From da7add3d7c58434b1af5fe607638228d4d0b5833 Mon Sep 17 00:00:00 2001 From: landmanbester Date: Wed, 21 Jun 2017 10:59:51 +0200 Subject: [PATCH 01/58] Fixed DeconvMachine.Init kwargs for auto-masking --- DDFacet/Imager/ClassDeconvMachine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index aa8a48c0..adf76619 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -1093,7 +1093,8 @@ def main(self,NMajor=None): self._fitAndSavePSF(self.FacetMachinePSF, cycle=iMajor) self.DeconvMachine.Init(PSFVar=self.DicoImagesPSF, PSFAve=self.PSFSidelobesAvg, approx=(sparsify > approximate_psf_above), - cache=not sparsify) + cache=not sparsify, GridFreqs=self.VS.FreqBandCenters, + DegridFreqs=self.VS.FreqBandChannelsDegrid[0], RefFreq=self.VS.RefFreq) # if we reached a sparsification of 1, we shan't be re-making the PSF if sparsify <= 1: From 0508fb0c020a32e6e0aeb0fd7c77591bd3a60759 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Mon, 26 Jun 2017 17:16:17 +0200 Subject: [PATCH 02/58] Adds a minimum size for the FFTs --- DDFacet/Imager/ClassFacetMachine.py | 13 +- DDFacet/ToolsDir/.ModFFTW.py.swp | Bin 0 -> 49152 bytes DDFacet/ToolsDir/ModFFTW.py | 987 +++++++++++++++------------- DDFacet/ToolsDir/ModToolBox.py | 26 +- 4 files changed, 561 insertions(+), 465 deletions(-) create mode 100644 DDFacet/ToolsDir/.ModFFTW.py.swp diff --git a/DDFacet/Imager/ClassFacetMachine.py b/DDFacet/Imager/ClassFacetMachine.py index b1fd26b3..43dbb59f 100644 --- a/DDFacet/Imager/ClassFacetMachine.py +++ b/DDFacet/Imager/ClassFacetMachine.py @@ -663,16 +663,21 @@ def _initcf_worker (self, iFacet, facet_dict, cachepath, cachevalid, wmax): mask2 = mask_flat2.reshape(X.shape) mask[mask2 == 0] = 0 + #NB: this spatial weighting is a bit arbitrary.... + #it may be better to do something like Montage's background + #normalization (http://montage.ipac.caltech.edu/docs/algorithms.html#background) GaussPars = (10, 10, 0) # compute spatial weight term sw = np.float32(mask.reshape((1, 1, Npix, Npix))) # already happening in parallel so make sure the FFT library doesn't spawn its own threads - sw = ModFFTW.ConvolveGaussianFFTW(sw, - CellSizeRad=1, - GaussPars=[GaussPars], - nthreads=1) + sw = ModFFTW.ConvolveGaussian(shareddict = {"in": sw, "out": sw}, + field_in = "in", + field_out = "out", + ch = 0, + CellSizeRad=1, + GaussPars_ch=GaussPars) sw = sw.reshape((Npix, Npix)) sw /= np.max(sw) ## Will speedup degridding NB: will it? diff --git a/DDFacet/ToolsDir/.ModFFTW.py.swp b/DDFacet/ToolsDir/.ModFFTW.py.swp new file mode 100644 index 0000000000000000000000000000000000000000..b8bcfc2c1ed8e3ce3a84b99efc911f6b31298a78 GIT binary patch literal 49152 zcmeI53v?t`d7#_C5@VZp63$_9D61K!rD?UK9@`mY+Cz_KWXAT$+L6YyMu>K)yCjvT zRo&@o%}8S(0_1Q4cmbPNSV$lN)?P3n3j~6_IDj!PJIiYcm~HDK7l0)aR@OtSm^ z_fb{-k~HHP155XrZ(4op*1fm>`~L5LZ!L^ox&MH(D>s_r^Mq9DF%L{%^@f+cwlvy_0U)YewI* zwOTdY>o-OY)GCvcvxjr_t;MpXr1U-YCGhJdfz{^X@UF+EcIL-aRZkC`=REmIhkm_e z(0|o{%_OQ0`-z6AOb=u4n4fxZNO%_Y#RpOyN3WbgD#x?glk|gTnO9YCrrS<2!8>80?V)mitu#!9e99A{9W)F z_%yr+UIRBm04s1c%)mH23LXLf8|A>~A%N$@Ww0N97oHC1!rAaMlpJ4&FT=;-t*{JJ zFa$qB3Gr3<6np^Q0)GOphL^%on1}7~Nfa7?34aXNLmg^xH9QNxg~H^1_&mGxHXHwoSPoHojYZ60!d$SrQeQ9On)CTDuSG(dlBv$tj(Uw_4ZrD?{Gh(t zRB6pR5=-DUO5WmPt9aYQ;4GoRNTe5Oa!HcbuyT6mx7YFQg5zjoMJ9qcI#f&8M`p+s3NEfVjQ1#Ear6i zi~>LC`k|K@sCe~;S9Y6TWt`IBhfbyDg~9fwbF9`l;!tXwaK)`wr8a~ES!b{Dj-A$F zw-NZkQe0t56g|ILDkW--Dp5SZ7}X|SmKy0u5zHiOkyFjN^|}{SGU`%sFiyIon-pIs zS`y>I8Pu_nxg7Mh;+|3sEQ1SzWYp@mK1!nL7QcwDzR$C`_uBIK$00&QcLa>PSXhwBN~sl;@zX zoFsvZqct+GgeB8n8(JlV`KA$F`MxC(y1)mae-Fld1_+%P-&@JTX3ty zGMcW3t4+U}E7w;`<=Se{Bo|T)1PWou&jo61y5jq&hB110~}NhBs-sv9p&zP^-12!h!Y6YfDRBBe&!=^G7W z$}uXDJNDGp><#-ux(*uVsk*Jn>H+3KYz&7<`~pMkw?sp6NL zt-e%1xnj5;l&2dtMjzocsj{~fr>AX75T>_QZ`hwY`?`c>Z%qurT79*oYpOn@QWgcP z21E9j%0R~rX>lnW`e9Z1V4%Hyduoz$Q&Ejo*4ZDFb6MwtF=vv_;s~v`Gt+E%UNh@V zQbv}Y$!e|E$U1v!VY3!wodZS38O@K4jSi3P939I#hh~bolv}OQ=pXa(u6o9Lqqfv= zS1xk=MaF{!rN&!Mv&LnQh&<1ksWEm#w5h9Vs}7CnYPI4lyGK2TKGtJ2P;p!}_H}Bs z?c8+9)FI~*j}aQRV0v|->X)6%{jwK?UMki5n(CHU7_2!Juj%^LFqb;8@8HC9i&L}3 zJ^L@;KYNu!S2?+Vc52_uj5B$~L8s_U7Z1+vpEz`R@t`w(=-~7fGy8ImGeeR|MrvWT z=^Wla``jxI%{s-YtDM8dg9i!hs*BXq*SP8(^#YY*#9sAz4xSRpHrIKQlnl@5s+Dvc z8AAABv*8mc{*b&p=yI(tQMpaWXJ|n~G1o{%_B>}5BZFuO)0*CRe)_kDr z;O~N{@e>OB^blHorL3GA9-AME{?5-EC5os>w8nv1x6ySc1Be2*np(Tk5M7YlPjK}h zy}`WROvrQSaSr5Ff)dIvqkYe3hmA@_^mU@3*XqxrQgKEpGa@QR4sELAu`Rx{^`EV&+1=C7YrY#zCI&dd>;tLloV~EAXQrx@Jjhj`Ui*iIuV!o*#vm z?x8wo?kk$_FeiWff>h{1eX1slQld;t$4|Ajs@SiI&Pmn8|F0{e%>R!vM}HT{{GX+1 z^|_rnz7Lne(3e180(}YeC2)EqAjVXD zu*<_zFLV}tuUf(OFr{1_=Q(v3GY#X6B61ufgo?LjSM_nhKvrKC^8{@RBc?D;M%L^x zOc0xhZh`qhA}nQR+btM->s(M?c7qV(VT|bbnp%god3)I;Gmd_orFuyQwheA$9XpW# z;|@_YVKzWQCDCF#ZPKsI9p#zCcu<8oFfcGt3y#*RM=>ODaSxWNo+D`pJWO~3+{&vw z9rvh|>4e#IrDXoUOook$jX3lFBPR1?{r^7rU+_`53El|T!L@K4TzEG8E4qO%!heC6 z!frSho(5;bS#UpV{9k}K!nJT5u7ce#2EPTr2@kTa|7G|i@F5KY@I-h5e3140KZ0X$ zG5n0R`=7%7@IT>pxEWp#e*klEAv_L#j-A7ILG}WC4n6|!ffvINIPg8}{O<*^|9>C6 z7mmX&coO^^+y49DOYjaj4leA4C&ClppHZUR1Ah%S!}DQ3JQps3e-HOy<9`J9fCG19 z+rI_};olyaO1&Fyf!DwyJR6<}|A&0~D7*$rAo-RB`!NN0E49RU-TD_w$4xy4#<(0^ zvzY&G=6?!hCYH^ihks?ID+k@m=AO3_pl)}lj++N( zo2YC`Rtd0h)YWuGRZ(jhsLY?^<{*n6TAYmGj=l#Y$yz`;F`os5wV9z~K7)Bt_w51J z{GSyJ8=|{Oh-$q+EJU{$rdP#UZon8k4@fJdl9aIc+^6zkbciSL zH1aHq)tcMv`6Ln;B8+UpGXdtC($e~C)+SsFTbZ_Bd@;*US!V#FE=KI5TQAj|_NX!elRTbK*+%M(vuu7VaP%n`1uu3R9^RnormQTz&Ky zKZs4x`8`L~8qLa9A7k4avYWxJiixkS78yUl29x`;4mNsTVY{?p+Xr)!)@X4CKZT+n zjH>Xo4Yxc`d0EPs-XTCQC#xeAZQ?E1EGbp}BVI;s)$5M<%1RHNEoDn8M7>C)X$K>pU@6Sa6;S!(!|x#6|ZasEw_UqfTpWLtk~sjQ6_%7d=*IB{}j*BuVcp$w<~@<-gV@guIlYZnf%Fb)sl!)_ImgS?5YEowV6O zbI9xxy3&+wi6*+1y)u>j?PeFUW#v*XWM*b5c&ZGBYZSm8nTgn8T}q_fxlkC+%;aZB zGUw-pjJN1#-hp1e!vfiSp`p!%Ei6Lx)8d`Uq7I*vKdGxV?w)@ z*@=^Ior%209+i{$Q%)+9L)j}6wQBWfSaz%4xcwH{tNLXf?6A=J4R&8nU8pGvkt$c*G9fR;%Q<*ja9inv_soH?gUDJd_y6 zP^YEpT!PK8Dm1#FBs>(PszenhEM!J=S@mx)+un3%ZBdH0IXdPO*)FKGSXcQj&iOQ+ zK8@$R7%l8cSbJ=+#GDL4{@$ajA02=(`bdV8>gHS7c*Jqry`pB1Tgxud&2iCC@*?O_x67IuS%2GC3`F*B}GmV{Uyv{3*=B*>Ep&>_3JYJR5$JqDO7}x+9cEwVf!ayOoMpQ3*{u3miwV6slc4?5JwW7AtyR)#*A5?0b=oTPuF0f{tv5Gp1%# z=BIQ6YQq!iHWS$X$w_Im-6dD_Xmz~@Dy$Ju=q-p=PA)c`QKl`lLr&8@Lej*Y8Pkl~ zmWW&#a*c|@l2IbhL zawof+T2mW8nogqLm!+FV@!cV{O6?fCxoSK_rd4nzB*(dqhlDlzSK(so)Z)x#bITRv z@vyVgVSGh5N-m-)Kzn(Cn)LcX*mQ%k8F9-T#e|;-j6Y2z>SFYtNrL%pV%sOX*oK|+ z+oQX%Yjh0n7Z4umQk^;}e`GWJSascaFcs5q#;%yZcEPA)d9>$Y-J_JWW?wSp3u zGIPwM(y?K!#`dq!mt9{(Az|^f6ia@{_AH#u)H$e38`IiSQeIA|*sboGr?R>M z>wPO~%@U1OA~X>(F2DN9f1P!w@_(xcv^rbe1fR+nHVEq%8o5o^Lrg?h{c1`AU({Xm zMp{~}hLqu^j1japIqrj(==@aOY3Yb1XbE}K@kPq|Ob!nMTC~y{{T>Zb$*U}RB~%BA zs=S3kf&{z4C!ZS~bcUS#plH6tUlfyQC|2AxlzdH;h&lE}S2LM-V|y;S?_FsbOY4Jk zWAn7zD%IwlX$zwH0N1!NzxB{LAUu2H|GT042W`6%JI0CZ1{!BOnWWN8O;V|q2dA6rO|I?R1 zUjn~y3CM^=J{t^VwTEAV|D!a8RF5M)KoYNjBW# z$XOLz4o!RPP%O?x0j@`}tjDp4=9Zs>@)p7~dmH zgmYp~=Zy1D&elb-nX(b%6_>^0kmyAtS9^7n?+#f;0jN+nN~umLr)Hm!sFe93)~l+c z!v0_@xu0ptU(Vo;RIKFepi%F3c~J3p%&#<;3z?0BNQfFyirQEVj@*i@8;juQwFg}Bj?l+xViE`%BRHK^AAS@KT3U)Hjj|9fV7P-DJ-7>Y0oa_;|E;7*XU06qq{z#Cv5 z9C$kX2Gr04ya1jK=fK$@=li`K-Ucs*D*O&Sh%Vq3cr%n?0d~Q^p%3^H{59MFa%SJP z@NM)0q9+&!vDZHf#Ag3*;Y;vgxEXGOHK;%t4!{VU0pCD(@MX9YJ`Oj*AHo7W6~2!S z;SgK^kA`ocJNPX88HgRhOW?&&f=l5-cq;q=J<8vL*cMy|^RNqkfPfFdGvNvFb!-Vf z0diLVYvEeh4Hv@*%FUy5KGzSG@H$A zgvbIus+VXz1zZ^}Ycn)+Jh?JSV+V>HHc$DLy#eLuWbjgcZ!M^V_o$c1P?l9c zv3vLK!RQE2vurBMp#W|^|cNYxFQdbu!2ijZ}wZR&tdf{&EiJQl|JcTh1O!fb#mwY+jdhwhdJio zCdqzFWTrhy*ltIJ6c9EF(DtJ8ScQ6ssF!fisrq@hJ&aFR*21uu#ay<$>~LyU&zOZPz*#9}(5@ zUI}|&_reRhT#%6A-KcYD39mVaf;_plcK zIOUrf2mX@P6m~5bQ_2W^^rKs)uot^j3wtSUo=OV+3br%?%L$RoM5v}@)aF4Qmeu^R>|*!4sfGl?mPeTPfrUj-)P+s&JH?e+A% z#Kop5F4p!XF7`@#OQ}^A@pLCPWz%%><-&$w^Gv_R#k#5o&_fRM78f~VJn;^KCk*~Jo_e1XLweO#k9yyQdhypO zZiTyMysZB-Wlw$0%>P%+{Qn=A^M4#<{r|Oa3>IJ?jKGh%>B}JJ{(k~Q5Ab@BegFT+ zeE&YU18#zsLJ6io_V@n`?u2*2;~gUt2shi}5&@IiPD%)kZkba(`OjXD2|;A*%4zQUaUcK8qQ zR1lj2IT-N=%<;bue+Qq0cf*ZvEnEo`Ap7~Z!*9YP;U4DypN5x102A=ra3=f@bO9fQ zcfu=R2F{0{Gxz@{djll;3^^04J&ORMfc>@O+$`n`LF zUB{%-dgtYw4sXcl8j zQ?Io5D3$H$Z!et3lcn?c6cVuI^Q4Mrv({2LTgs+t(<#!XG^+=hVt09qX{*{YpCJ|T zMr&@#52>4{cIZvna;tW*;*rU|%A?NFn5033mp;6_zI*pL=Bc6XsqEsseuRj}T(OP6Iu@>ieYqZ*LX46cd_BaS#Jw(i%ibhq)M4P4 zY{4Qv%w9O#~a;Uwcc{m6K*Hu7?0 zRHuhXOUmV`wd9!I6z?rWAfs9>23Ul!e zd6v<7-jFD?HWktwPndIkRCw%tk~yY6!XlN{hxypqEz4=s(+n<_*dXpTF0Cy*m(7Az z-u#u>;BcU9uakqOrU$idt))E|$w@5kG1qSr#X&Dztv18VHMIr5vSVOqV9-g^j>~)7 zc+X$K9v;SS!-O7KC37|4c5@%EcFVBXp4~8=Zet0RIKM8#@8^@9r&0ox{fG5$OE(51 z>gVguTzg_9x3bp$Ibll6U_1Ng!jYy;08Dk?q+^}7rYZ(xEiKG&IEW0{De9F z_uyXm8@L-@2d@Pea`3Or`R|6i;4|=7@D{iZ#4g}4TnR;Z4x9&P!@rQw2SDrv-T?B> zzu$*z-~f!nGawCOH}FmneZlMCC9ncjkTVCK4gZXe;P2sUa68-#H-el8Sc3iVEZ7BU z5Zi)9xD=iYavtCh&=>pz+y=M8Tj3S32;1SeK=vuT7v2mngn4)lJOdsD-$76CX}ATh zhh><8XTaHT7W@o-!B62$uop7$Pv{E7w&1gHD|`sv0xt&-#NOacxP!cXH@q5-z-1tI z29oEGgT%-7eOnx^Qr==ylIY%+vd91@uTMd+*IO>fV`f!ctsg12IGo5}^r0Q6U?c8Q zSANxow6p`g4_Q#7o0A;F_UeH|c0C7;C;x)^Zeyzn-7BAD*OIm4$A>->mWi+&XNR!m z_262$(=v2KQ6m=$8TyKNRwD&lxpN}}#RKZ_qfRd^{fj=_tR?V8=}pxA)UW7W;}SR@ zao3l55jn$U#wYDwS~??J_D?~jkCN0Gpr$TO1 z6+{mjvPQ{tVw2oo@fKogT4}xf@+oU6n8c|y8Buh_GdZa=$%&WzHfuoAyc*jKTeZZg z@Vo=LqOd_m>p!-QNfa9y+b05wV^;Sx08>H%ZX;35~fK@uSk=RsUtN- zEP5(j2V3$g4yrA;9+JYXXWu~4il&at8C9Gj7*nDP8!fF$P0&Z9CPca69W6I(DGD_M zj8bJv=Z10UJ9O^E^;#{hMS8};o@z#QpOIE!4;q2U!Kz7>L+c6;dDdEW<;J*b%bhJX zTNI4(<>wvXs##r#uXNOTNmRp?12Cs*&HXF&s<*MCQFHcs;}R2l}(0hUM2O5903wpIylO~(-pM6;&d zk5nHjn$EU6%ip{l82Be<*9m%>P%-Wbj+e^*;qSgPa4n z1X*}8JP95FXFv+x#JqnmOuztqhdI930Lb}(AA+~T+u&vJdm!fpo(ccRocR49dKw)fOAxqZL}Va#({--b&=nRpql zZk8o%wV-Kj$&JmW4M-nLYgT0ATTVl5CLx_msLCcgxiujjo2uzO2~kt36rCXn==X-y=7+1MhTuiP{bzHroI@K{z=#KS@OB(Hyicd;yH%Z#Owh@ZIc8&G^AXb!DScWUI zcs8(y{}LHkNc^2Rdn;b+j5}I~0>v?xGiq%`8;h#YGU#O70OQWAlOG-BAWHe4)fO46 z$r}qeoL`r|&LWzKyfBu|T#y^tc_H`ebF4?kcA4QrRJ01toSbXldSvzB{E$k4jzrF| z-Z`3U4?>@Cts+rzfB_TGh_Il1|wHRXX{hJaKy&W<#Q0>xo7uDJkk{ zJMUF`EWP-WHb>4QVmb9zT^WonDTAy!Cqa5Fw3^qBa7TxomJVIJ8yl^pQ@K{HH5S|k z(oZ@!{brR@_-*Pd4#&#LtB>S3O=Oz%YD}?6rC4~GWb&>GR%216(D|Rd z)f#zW;*~wWHPnsMr7951!W$<=?Z!Oul4A4wICt{FJS6;qZDll78|GBK0ak8EZOh3+>|*SXQE zeWe+aQ}0_>7>&h{p3Su0Xtg8k_L5=}C6h3_EIHcXi6hcJXk(-})Kj48HPPsYkb)B> ziyt&B9m#0%BS{y3Z3kJ}@nGxJ-nu?aD?B8=m)TOX_s*(=furs*e*PB2B?(N9z-2TL zwwo?-O=gmjv*gO@GhR~p(@U$sv7N|VREb@!mt^IK9F^_;CH|R&8)cOW_%saFyRH8m zWc#DcyOSBAC*HA`wKg8f|T9Am=Vb`kV^f@w*rZEAXDO_dZ+5U>S*G! YA?1Gag`q^<$)DufP@I_jhHF3n558t)IsgCw literal 0 HcmV?d00001 diff --git a/DDFacet/ToolsDir/ModFFTW.py b/DDFacet/ToolsDir/ModFFTW.py index 0f8862b2..2323a66f 100644 --- a/DDFacet/ToolsDir/ModFFTW.py +++ b/DDFacet/ToolsDir/ModFFTW.py @@ -29,6 +29,10 @@ from DDFacet.Other.AsyncProcessPool import APP from DDFacet.Array import shared_dict from DDFacet.Other import MyLogger +import ModToolBox +from DDFacet.ToolsDir.ModToolBox import EstimateNpix +from DDFacet.ToolsDir import Gaussian + log=MyLogger.getLogger("ModFFTW") #Fs=pyfftw.interfaces.numpy_fft.fftshift @@ -40,101 +44,99 @@ NCPU_global = 0#psutil.cpu_count() -def test(): - size=20 - dtype=np.complex128 - test_array = np.zeros( (size,size), dtype=dtype) - - test_array[11,11]=1 - #test_array.fill(1) - #test_array[size*3/8:size*5/8, size*3/8:size*5/8] = 1+1j # square aperture oversampling 2... - A=test_array - F=FFTWnp(A) - - f_A=F.fft(A) - if_f_A=F.ifft(f_A) - - import pylab - pylab.clf() - lA=[A,f_A,if_f_A] - iplot=0 - for iA in lA: - pylab.subplot(3,2,iplot+1) - pylab.imshow(iA.real,interpolation="nearest") - pylab.colorbar() - pylab.subplot(3,2,iplot+2) - pylab.imshow(iA.imag,interpolation="nearest") - pylab.colorbar() - iplot+=2 - pylab.draw() - pylab.show(False) - -def test2(): - l=[] - size=2048 - dtype=np.complex128 - test_array = np.zeros( (size,size), dtype=dtype) - test_array[size*3/8:size*5/8, size*3/8:size*5/8] = 1+1j # square aperture oversampling 2... - A=test_array - for i in range(5): - print i - l.append(FFTW(A)) - - - - - -class FFTW(): - def __init__(self, A, ncores = 1): - dtype=A.dtype - self.A = pyfftw.n_byte_align_empty( A.shape, 16, dtype=dtype) - - pyfftw.interfaces.cache.enable() - pyfftw.interfaces.cache.set_keepalive_time(30) - self.ncores=ncores - #print "plan" - T= ClassTimeIt.ClassTimeIt("ModFFTW") - T.disable() - - self.A = pyfftw.interfaces.numpy_fft.fft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) - T.timeit("planF") - self.A = pyfftw.interfaces.numpy_fft.ifft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) - T.timeit("planB") - #print "done" - self.ThisType=dtype - - def fft(self, A, norm=True): - axes=(-1,-2) - - T= ClassTimeIt.ClassTimeIt("ModFFTW") - T.disable() - self.A[:,:] = iFs(A.astype(self.ThisType),axes=axes) - T.timeit("shift and copy") - #print "do fft" - self.A = pyfftw.interfaces.numpy_fft.fft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) - T.timeit("fft") - #print "done" - if norm: - out=Fs(self.A,axes=axes)/(A.shape[-1]*A.shape[-2]) - T.timeit("shift") - return out - - - def ifft(self,A): - axes=(-1,-2) - #log=MyLogger.getLogger("ModToolBox.FFTM2.ifft") - self.A[:,:] = iFs(A.astype(self.ThisType),axes=axes) - - #print "do fft" - self.A = out = pyfftw.interfaces.numpy_fft.ifft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) - if norm: - out=Fs(self.A,axes=axes)*(A.shape[-1]*A.shape[-2]) - return out +#def test(): +# size=20 +# dtype=np.complex128 +# test_array = np.zeros( (size,size), dtype=dtype) +# +# test_array[11,11]=1 +# #test_array.fill(1) +# #test_array[size*3/8:size*5/8, size*3/8:size*5/8] = 1+1j # square aperture oversampling 2... +# A=test_array +# F=FFTWnp(A) +# +# f_A=F.fft(A) +# if_f_A=F.ifft(f_A) +# +# import pylab +# pylab.clf() +# lA=[A,f_A,if_f_A] +# iplot=0 +# for iA in lA: +# pylab.subplot(3,2,iplot+1) +# pylab.imshow(iA.real,interpolation="nearest") +# pylab.colorbar() +# pylab.subplot(3,2,iplot+2) +# pylab.imshow(iA.imag,interpolation="nearest") +# pylab.colorbar() +# iplot+=2 +# pylab.draw() +# pylab.show(False) + +#def test2(): +# l=[] +# size=2048 +# dtype=np.complex128 +# test_array = np.zeros( (size,size), dtype=dtype) +# test_array[size*3/8:size*5/8, size*3/8:size*5/8] = 1+1j # square aperture oversampling 2... +# A=test_array +# for i in range(5): +# print i +# l.append(FFTW(A)) + +#class FFTW(): +# def __init__(self, A, ncores = 1): +# Raise("deprecated: this doesn't work for small ffts", DeprecationWarning) + +# dtype=A.dtype +# self.A = pyfftw.n_byte_align_empty( A.shape, 16, dtype=dtype) + +# pyfftw.interfaces.cache.enable() +# pyfftw.interfaces.cache.set_keepalive_time(30) +# self.ncores=ncores +# #print "plan" +# T= ClassTimeIt.ClassTimeIt("ModFFTW") +# T.disable() + +# self.A = pyfftw.interfaces.numpy_fft.fft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) +# T.timeit("planF") +# self.A = pyfftw.interfaces.numpy_fft.ifft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) +# T.timeit("planB") +# #print "done" +# self.ThisType=dtype + +# def fft(self, A, norm=True): +# axes=(-1,-2) + +# T= ClassTimeIt.ClassTimeIt("ModFFTW") +# T.disable() +# self.A[:,:] = iFs(A.astype(self.ThisType),axes=axes) +# T.timeit("shift and copy") +# #print "do fft" +# self.A = pyfftw.interfaces.numpy_fft.fft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) +# T.timeit("fft") +# #print "done" +# if norm: +# out=Fs(self.A,axes=axes)/(A.shape[-1]*A.shape[-2]) +# T.timeit("shift") +# return out +# + +# def ifft(self,A): +# axes=(-1,-2) +# #log=MyLogger.getLogger("ModToolBox.FFTM2.ifft") +# self.A[:,:] = iFs(A.astype(self.ThisType),axes=axes) + +# #print "do fft" +# self.A = out = pyfftw.interfaces.numpy_fft.ifft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE', threads=self.ncores) +# if norm: +# out=Fs(self.A,axes=axes)*(A.shape[-1]*A.shape[-2]) +# return out def GiveFFTW_aligned(shape, dtype): return pyfftw.n_byte_align_empty( shape[-2::], 16, dtype=dtype) - +# FFTW version of the FFT engine class FFTW_2Donly(): def __init__(self, shape, dtype, norm=True, ncores=1, FromSharedId=None): # if FromSharedId is None: @@ -183,7 +185,6 @@ def fft(self, Ain): A /= (A.shape[-1] * A.shape[-2]) return A.reshape(sin) - def ifft(self, A, norm=True): axes=(-1,-2) @@ -202,8 +203,7 @@ def ifft(self, A, norm=True): A *= (A.shape[-1] * A.shape[-2]) return A.reshape(sin) - - +# LAPACK (or ATLAS???) version of the FFT engine class FFTW_2Donly_np(): def __init__(self, shape=None, dtype=None, ncores = 1): @@ -233,7 +233,6 @@ def fft(self,A,ChanList=None): T.timeit("shift") return A - def ifft(self,A,ChanList=None): axes=(-1,-2) @@ -282,23 +281,24 @@ def GiveGauss(Npix,CellSizeRad=None,GaussPars=(0.,0.,0.),dtype=np.float32,parall #Gauss/=np.sum(Gauss) return Gauss -from DDFacet.ToolsDir import Gaussian -def ConvolveGaussianScipy(Ain0,Sig=1.,GaussPar=None): - Npix=int(2*8*Sig) - if Npix%2==0: Npix+=1 - x0=Npix/2 - x,y=np.mgrid[-x0:x0:Npix*1j,-x0:x0:Npix*1j] - #in2=np.exp(-(x**2+y**2)/(2.*Sig**2)) - if GaussPar is None: - GaussPar=(Sig,Sig,0) - in2=Gaussian.Gaussian2D(x,y,GaussPar=GaussPar) - - nch,npol,_,_=Ain0.shape - Out=np.zeros_like(Ain0) - for ch in range(nch): - in1=Ain0[ch,0] - Out[ch,0,:,:]=scipy.signal.fftconvolve(in1, in2, mode='same').real - return Out,in2 +#def ConvolveGaussianScipy(Ain0,Sig=1.,GaussPar=None): +# warnings.warn("deprecated: this wont work for small ffts...", +# DeprecationWarning) +# Npix=int(2*8*Sig) +# if Npix%2==0: Npix+=1 +# x0=Npix/2 +# x,y=np.mgrid[-x0:x0:Npix*1j,-x0:x0:Npix*1j] +# #in2=np.exp(-(x**2+y**2)/(2.*Sig**2)) +# if GaussPar is None: +# GaussPar=(Sig,Sig,0) +# in2=Gaussian.Gaussian2D(x,y,GaussPar=GaussPar) + +# nch,npol,_,_=Ain0.shape +# Out=np.zeros_like(Ain0) +# for ch in range(nch): +# in1=Ain0[ch,0] +# Out[ch,0,:,:]=scipy.signal.fftconvolve(in1, in2, mode='same').real +# return Out,in2 def learnFFTWWisdom(npix,dtype=np.float32): @@ -312,28 +312,101 @@ def learnFFTWWisdom(npix,dtype=np.float32): a = pyfftw.interfaces.numpy_fft.fft2(test, overwrite_input=True, threads=1) b = pyfftw.interfaces.numpy_fft.ifft2(a, overwrite_input=True, threads=1) -def _convolveSingleGaussianFFTW(shareddict, field_in, field_out, ch, CellSizeRad, GaussPars_ch, Normalise): +# FFTW-based convolution +def _convolveSingleGaussianFFTW(shareddict, + field_in, + field_out, + ch, + CellSizeRad, + GaussPars_ch, + Normalise = False, + nthreads = 1): + """Convolves a single channel in a cube of nchan, npol, Ny, Nx + @param shareddict: a dictionary containing an input and output array of size + [nchans, npols, Ny, Nx] + @param field_in: index of input field in shareddict + @param field_out: index of the output field in shareddict (can be the + same as field_in + @param ch: index of channel to convolve + @param CellSizeRad: pixel size in radians of the gaussian in image space + @param Normalize: Normalize the guassian amplitude + """ + # The FFT needs to be big enough to avoid spectral leakage in the + # transforms, so we pad both sides of the stack of images with the same + # number of pixels. This preserves the baseband component correctly: + # Even - 4 pixels becomes 6 for instance: + # | | | x | | => | | | | x | | | + # Odd - 3 pixels becomes 5 for instance: + # | | x | | => | | | x | | | + # IFFTShift will shift the central location down to 0 (middle + 1 and + # middle for even and odd respectively). After FFT the baseband is at + # 0 as expected. FFTShift can then recentre the FFT. Going back the + # IFF is again applied so baseband is at 0, ifft taken and FFTShift + # brings the central location back to middle + 1 and middle for even and + # odd respectively. The signal can then safely be unpadded + T = ClassTimeIt.ClassTimeIt() T.disable() Ain = shareddict[field_in][ch] Aout = shareddict[field_out][ch] T.timeit("init %d"%ch) - PSF = GiveGauss(Ain.shape[-1], CellSizeRad, GaussPars_ch, parallel=True) - # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) + npol, npix_y, npix_x = Ain.shape + pad_edge_x = max(int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / + 2.0) * 2),0) + pad_edge_y = max(int(np.ceil((ModToolBox.EstimateNpix(npix_y)[1] - npix_y) / + 2.0) * 2),0) + PSF = GiveGauss(npix_x + pad_edge_x, CellSizeRad, GaussPars_ch, parallel=True) + if Normalise: PSF /= np.sum(PSF) T.timeit("givegauss %d"%ch) - fPSF = pyfftw.interfaces.numpy_fft.rfft2(iFs(PSF), overwrite_input=True, threads=1) + fPSF = pyfftw.interfaces.numpy_fft.rfft2(iFs(PSF), + overwrite_input=True, + threads=nthreads) fPSF = np.abs(fPSF) - npol = Ain.shape[0] for pol in range(npol): - A = iFs(Ain[pol]) - fA = pyfftw.interfaces.numpy_fft.rfft2(A, overwrite_input=True, threads=1) + A = iFs(np.pad(Ain[pol], + pad_width=((pad_edge_y//2, pad_edge_x//2), + (pad_edge_y//2, pad_edge_x//2)), + mode="constant")) + fA = pyfftw.interfaces.numpy_fft.rfft2(A, overwrite_input=True, + threads=nthreads) nfA = fA*fPSF - Aout[pol, :, :] = Fs(pyfftw.interfaces.numpy_fft.irfft2(nfA, s=A.shape, overwrite_input=True, threads=1)) + Aout[pol, :, :] = Fs( + pyfftw.interfaces.numpy_fft.irfft2(nfA, + s=A.shape, + overwrite_input=True, + threads=nthreads)[pad_edge_y//2:npix_y-pad_edge_y//2, + pad_edge_x//2:npix_x-pad_edge_x//2]) T.timeit("convolve %d" % ch) + return Aout -def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, GaussPars_ch, Normalise): +# LAPACK / ATLAS-based convolution +def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, + GaussPars_ch, Normalise = False): + """Convolves a single channel in a cube of nchan, npol, Ny, Nx + @param shareddict: a dictionary containing an input and output array of size + [nchans, npols, Ny, Nx] + @param field_in: index of input field in shareddict + @param field_out: index of the output field in shareddict (can be the + same as field_in + @param ch: index of channel to convolve + @param CellSizeRad: pixel size in radians of the gaussian in image space + @param Normalize: Normalize the guassian amplitude + """ + # The FFT needs to be big enough to avoid spectral leakage in the + # transforms, so we pad both sides of the stack of images with the same + # number of pixels. This preserves the baseband component correctly: + # Even - 4 pixels becomes 6 for instance: + # | | | x | | => | | | | x | | | + # Odd - 3 pixels becomes 5 for instance: + # | | x | | => | | | x | | | + # IFFTShift will shift the central location down to 0 (middle + 1 and + # middle for even and odd respectively). After FFT the baseband is at + # 0 as expected. FFTShift can then recentre the FFT. Going back the + # IFF is again applied so baseband is at 0, ifft taken and FFTShift + # brings the central location back to middle + 1 and middle for even and + # odd respectively. The signal can then safely be unpadded T = ClassTimeIt.ClassTimeIt() Ain = shareddict[field_in][ch] Aout = shareddict[field_out][ch] @@ -352,7 +425,9 @@ def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, nfA = fA*fPSF Aout[pol, :, :] = np.fft.irfft2(nfA, s=A.shape) T.timeit("convolve %d" % ch) + return Aout +ConvolveGaussian = _convolveSingleGaussianFFTW def ConvolveGaussianParallel(shareddict, field_in, field_out, CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False): """Convolves images held in a dict, using APP. @@ -373,237 +448,245 @@ def ConvolveGaussianParallel(shareddict, field_in, field_out, CellSizeRad=None,G APP.registerJobHandlers(_convolveSingleGaussianFFTW, _convolveSingleGaussianNP) -# FFTW version -def ConvolveGaussianFFTW(Ain0, - CellSizeRad=None, - GaussPars=[(0.,0.,0.)], - Normalise=False, - out=None, - nthreads=1): - nch,npol,_,_=Ain0.shape - Aout = np.zeros_like(Ain0) if out is None else out - T = ClassTimeIt.ClassTimeIt() - T.disable() -# FFTM = FFTW_2Donly(Ain0[0,...].shape, Ain0.dtype, norm=False, ncores=0) # full-parellel if available - T.timeit("init") - - for ch in range(nch): - Ain=Ain0[ch] - ThisGaussPars=GaussPars[ch] - PSF=GiveGauss(Ain.shape[-1],CellSizeRad,ThisGaussPars) - T.timeit("givegauss") - if Normalise: - PSF/=np.sum(PSF) - PSF = np.fft.ifftshift(PSF) - fPSF = pyfftw.interfaces.numpy_fft.rfft2(PSF, overwrite_input=True, threads=nthreads) - for pol in range(npol): - A = np.fft.ifftshift(Ain[pol]) - fA = pyfftw.interfaces.numpy_fft.rfft2(A, overwrite_input=True, threads=nthreads) - nfA = fA*fPSF - ifA= pyfftw.interfaces.numpy_fft.irfft2(nfA, s=A.shape, overwrite_input=True, threads=nthreads) - Aout[ch, pol, :, :] = np.fft.fftshift(ifA) - T.timeit("conv") - - return Aout - -from DDFacet.ToolsDir.ModToolBox import EstimateNpix -class ZMachine(): - def __init__(self,A): - self.N=A.shape[-1] - zN=2*self.N+1 - zN,_=EstimateNpix(float(zN)) - self.zN=zN - - def toZeroPad(self,A): - zN=self.zN - N=self.N - zA=np.zeros((zN,zN),dtype=A.dtype) - if N%2: - zA[zN/2-N/2:zN/2+N/2+1,zN/2-N/2:zN/2+N/2+1]=A[:,:] - #nx,ny=A.shape - #zA[:nx/2+1,0:ny]=A[:nx/2+1,:] - #zA[-nx/2+1:,0:ny]=A[-nx/2+1:,:] - else: - zA[zN/2-N/2:zN/2+N/2,zN/2-N/2:zN/2+N/2]=A[:,:] - - # import pylab - # pylab.subplot(1,2,1) - # pylab.imshow(A.real,interpolation="nearest") - # pylab.subplot(1,2,2) - # pylab.imshow(zA.real,interpolation="nearest") - # pylab.draw() - # pylab.show(False) - # stop - - return zA - - def fromZeroPad(self,zA): - zN=self.zN - N=self.N - A=np.zeros((N,N),dtype=zA.dtype) - if N%2: - A[:,:]=zA[zN/2-N/2:zN/2+N/2+1,zN/2-N/2:zN/2+N/2+1] - else: - A[:,:]=zA[zN/2-N/2:zN/2+N/2,zN/2-N/2:zN/2+N/2] - - return A - -# FFTW version -def ConvolveFFTW2D(Ain0,Bin0,CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False,out=None,ZeroPad=False): - if Ain0.shape != Bin0.shape: - raise NotImplementedError("Arrays should have the same shapes") - - if ZeroPad: - ZM=ZMachine(Ain0) - Ain=ZM.toZeroPad(Ain0) - PSF=ZM.toZeroPad(Bin0) - else: - Ain=Ain0 - PSF=Bin0 - - #Aout = np.zeros_like(Ain) if out is None else out - - fft_forward=pyfftw.interfaces.numpy_fft.rfft2 - fft_bakward=pyfftw.interfaces.numpy_fft.irfft2 - fft_forward=pyfftw.interfaces.numpy_fft.fft2 - fft_bakward=pyfftw.interfaces.numpy_fft.ifft2 - - - if Normalise: - PSF/=np.sum(PSF) - T = ClassTimeIt.ClassTimeIt() - T.disable() - T.timeit("init") - - #print PSF.shape - - PSF = np.fft.fftshift(PSF) - T.timeit("shoft") - #print PSF.shape - - fPSF = fft_forward(PSF, overwrite_input=True, threads=1)#NCPU_global) - T.timeit("fft1") - #print fPSF.shape - - #print Ain.shape - A = np.fft.fftshift(Ain) - #print A.shape - T.timeit("shoft") - - fA = fft_forward(A, overwrite_input=True, threads=1)#NCPU_global), planner_effort='FFTW_MEASURE' - T.timeit("fft2") - #print fA.shape - - nfA = fA*fPSF - T.timeit("mult") +## FFTW version +#def ConvolveGaussianFFTW(Ain0, +# CellSizeRad=None, +# GaussPars=[(0.,0.,0.)], +# Normalise=False, +# out=None, +# nthreads=1, +# min_size_fft=2048): +# warnings.warn("deprecated", DeprecationWarning) + +# assert Ain0.shape == 4, "Expected stack of images: nch, npol, Ny, Nx" +# nch,npol,Ny,Nx=Ain0.shape +# pady = max(Ny, min_size_fft) +# padx = max(Nx, min_size_fft) +# Aout = np.zeros_like(Ain0) if out is None else out +# T = ClassTimeIt.ClassTimeIt() +# T.disable() +# T.timeit("init") + +# for ch in range(nch): +# Ain=Ain0[ch] +# ThisGaussPars=GaussPars[ch] +# PSF=GiveGauss(Ain.shape[-1],CellSizeRad,ThisGaussPars) +# T.timeit("givegauss") +# if Normalise: +# PSF/=np.sum(PSF) +# PSF = np.fft.ifftshift(PSF) +# fPSF = pyfftw.interfaces.numpy_fft.rfft2(PSF, overwrite_input=True, threads=nthreads) +# for pol in range(npol): +# A = np.fft.ifftshift(Ain[pol]) +# fA = pyfftw.interfaces.numpy_fft.rfft2(A, overwrite_input=True, threads=nthreads) +# nfA = fA*fPSF +# ifA= pyfftw.interfaces.numpy_fft.irfft2(nfA, s=A.shape, overwrite_input=True, threads=nthreads) +# Aout[ch, pol, :, :] = np.fft.fftshift(ifA) +# T.timeit("conv") + +# return Aout + +#class ZMachine(): +# #Why??: np.pad... +# def __init__(self,A): +# self.N=A.shape[-1] +# zN=2*self.N+1 +# zN,_=EstimateNpix(float(zN)) +# self.zN=zN + +# def toZeroPad(self,A): +# zN=self.zN +# N=self.N +# zA=np.zeros((zN,zN),dtype=A.dtype) +# if N%2: +# zA[zN/2-N/2:zN/2+N/2+1,zN/2-N/2:zN/2+N/2+1]=A[:,:] +# #nx,ny=A.shape +# #zA[:nx/2+1,0:ny]=A[:nx/2+1,:] +# #zA[-nx/2+1:,0:ny]=A[-nx/2+1:,:] +# else: +# zA[zN/2-N/2:zN/2+N/2,zN/2-N/2:zN/2+N/2]=A[:,:] +# +# # import pylab +# # pylab.subplot(1,2,1) +# # pylab.imshow(A.real,interpolation="nearest") +# # pylab.subplot(1,2,2) +# # pylab.imshow(zA.real,interpolation="nearest") +# # pylab.draw() +# # pylab.show(False) +# # stop + +# return zA + +# def fromZeroPad(self,zA): +# zN=self.zN +# N=self.N +# A=np.zeros((N,N),dtype=zA.dtype) +# if N%2: +# A[:,:]=zA[zN/2-N/2:zN/2+N/2+1,zN/2-N/2:zN/2+N/2+1] +# else: +# A[:,:]=zA[zN/2-N/2:zN/2+N/2,zN/2-N/2:zN/2+N/2] + +# return A + +## FFTW-based convolution version +#def ConvolveFFTW2D(Ain0,Bin0,CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False,out=None,ZeroPad=False): +# warnings.warn("deprecated: this doesn't work for small ffts", DeprecationWarning) + +# if Ain0.shape != Bin0.shape: +# raise NotImplementedError("Arrays should have the same shapes") + +# if ZeroPad: +# ZM=ZMachine(Ain0) +# Ain=ZM.toZeroPad(Ain0) +# PSF=ZM.toZeroPad(Bin0) +# else: +# Ain=Ain0 +# PSF=Bin0 + +# #Aout = np.zeros_like(Ain) if out is None else out + +# fft_forward=pyfftw.interfaces.numpy_fft.rfft2 +# fft_bakward=pyfftw.interfaces.numpy_fft.irfft2 +# fft_forward=pyfftw.interfaces.numpy_fft.fft2 +# fft_bakward=pyfftw.interfaces.numpy_fft.ifft2 + + +# if Normalise: +# PSF/=np.sum(PSF) +# T = ClassTimeIt.ClassTimeIt() +# T.disable() +# T.timeit("init") + +# #print PSF.shape + +# PSF = np.fft.fftshift(PSF) +# T.timeit("shoft") +# #print PSF.shape + +# fPSF = fft_forward(PSF, overwrite_input=True, threads=1)#NCPU_global) +# T.timeit("fft1") +# #print fPSF.shape + +# #print Ain.shape +# A = np.fft.fftshift(Ain) +# #print A.shape +# T.timeit("shoft") + +# fA = fft_forward(A, overwrite_input=True, threads=1)#NCPU_global), planner_effort='FFTW_MEASURE' +# T.timeit("fft2") +# #print fA.shape + +# nfA = fA*fPSF +# T.timeit("mult") # if ZeroPad: # nfA=ZM.toZeroPad(nfA) - ifA= fft_bakward(nfA, overwrite_input=True, threads=1)#NCPU_global) - T.timeit("ifft") - - Aout = np.fft.ifftshift(ifA) - T.timeit("shift") - #print Aout.shape - if ZeroPad: - Aout=ZM.fromZeroPad(Aout) - #print Aout.shape - return Aout - -## numpy.fft version -def ConvolveGaussianNPclassic(Ain0,CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False,out=None): +# ifA= fft_bakward(nfA, overwrite_input=True, threads=1)#NCPU_global) +# T.timeit("ifft") - nch,npol,_,_=Ain0.shape - Aout = np.zeros_like(Ain0) if out is None else out - - T = ClassTimeIt.ClassTimeIt() - T.disable() - T.timeit("init %s"%(Ain0.shape,)) +# Aout = np.fft.ifftshift(ifA) +# T.timeit("shift") +# #print Aout.shape +# if ZeroPad: +# Aout=ZM.fromZeroPad(Aout) +# #print Aout.shape +# return Aout - for ch in range(nch): - Ain=Ain0[ch] - ThisGaussPars=GaussPars[ch] - PSF=GiveGauss(Ain.shape[-1],CellSizeRad,ThisGaussPars) - print PSF - T.timeit("givegauss") - # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) - if Normalise: - PSF/=np.sum(PSF) - FFTM = FFTWnpNonorm(PSF) - fPSF = FFTM.fft(PSF) - fPSF = np.abs(fPSF) - for pol in range(npol): - A = Ain[pol] - FFTM = FFTWnpNonorm(A) - fA = FFTM.fft(A) - nfA = fA*fPSF#Gauss - if_fA = FFTM.ifft(nfA) - # if_fA=(nfA) - Aout[ch,pol,:,:] = if_fA.real - T.timeit("conv") - - return Aout - -def ConvolveGaussianC(Ain0,CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False,out=None): - - nch,npol,_,_=Ain0.shape - Aout = np.zeros_like(Ain0) if out is None else out - - T = ClassTimeIt.ClassTimeIt() - T.timeit("init") - - for ch in range(nch): - Ain=Ain0[ch] - ThisGaussPars=GaussPars[ch] - PSF=GiveGauss(Ain.shape[-1],CellSizeRad,ThisGaussPars) - T.timeit("givegauss") - # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) - if Normalise: - PSF/=np.sum(PSF) - PSF = np.fft.ifftshift(PSF) - fPSF = np.fft.fft2(PSF) - fPSF = np.abs(fPSF) - for pol in range(npol): - A = Ain[pol] - fA = np.fft.fft2(A) - nfA = fA*fPSF#Gauss - if_fA = np.fft.ifft2(nfA) - # if_fA=(nfA) - Aout[ch,pol,:,:] = np.fft.fftshift(if_fA.real) - T.timeit("conv") - - return Aout - - -def ConvolveGaussianR (Ain0, CellSizeRad=None, GaussPars=[(0., 0., 0.)], Normalise=False, out=None): - nch, npol, _, _ = Ain0.shape - Aout = np.zeros_like(Ain0) if out is None else out - - T = ClassTimeIt.ClassTimeIt() - T.disable() - T.timeit("init") - - for ch in range(nch): - Ain = Ain0[ch] - ThisGaussPars = GaussPars[ch] - PSF = GiveGauss(Ain.shape[-1], CellSizeRad, ThisGaussPars) - T.timeit("givegauss") - # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) - if Normalise: - PSF /= np.sum(PSF) - PSF = np.fft.ifftshift(PSF) - fPSF = np.fft.rfft2(PSF) - fPSF = np.abs(fPSF) - for pol in range(npol): - fA = np.fft.rfft2(np.fft.ifftshift(Ain[pol])) - nfA = fA * fPSF - if_fA = np.fft.irfft2(nfA, s=Ain[pol].shape) - Aout[ch, pol, :, :] = np.fft.fftshift(if_fA) - T.timeit("conv") - - return Aout +## numpy.fft version +#def ConvolveGaussianNPclassic(Ain0,CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False,out=None): +# warnings.warn("deprecated: this doesn't work for small ffts", DeprecationWarning) +# nch,npol,_,_=Ain0.shape +# Aout = np.zeros_like(Ain0) if out is None else out + +# T = ClassTimeIt.ClassTimeIt() +# T.disable() +# T.timeit("init %s"%(Ain0.shape,)) + +# for ch in range(nch): +# Ain=Ain0[ch] +# ThisGaussPars=GaussPars[ch] +# PSF=GiveGauss(Ain.shape[-1],CellSizeRad,ThisGaussPars) +# print PSF +# T.timeit("givegauss") +# # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) +# if Normalise: +# PSF/=np.sum(PSF) +# FFTM = FFTWnpNonorm(PSF) +# fPSF = FFTM.fft(PSF) +# fPSF = np.abs(fPSF) +# for pol in range(npol): +# A = Ain[pol] +# FFTM = FFTWnpNonorm(A) +# fA = FFTM.fft(A) +# nfA = fA*fPSF#Gauss +# if_fA = FFTM.ifft(nfA) +# # if_fA=(nfA) +# Aout[ch,pol,:,:] = if_fA.real +# T.timeit("conv") + +# return Aout + +#def ConvolveGaussianC(Ain0,CellSizeRad=None,GaussPars=[(0.,0.,0.)],Normalise=False,out=None): +# warnings.warn("deprecated: this doesn't work for small ffts", DeprecationWarning) +# nch,npol,_,_=Ain0.shape +# Aout = np.zeros_like(Ain0) if out is None else out + +# T = ClassTimeIt.ClassTimeIt() +# T.timeit("init") + +# for ch in range(nch): +# Ain=Ain0[ch] +# ThisGaussPars=GaussPars[ch] +# PSF=GiveGauss(Ain.shape[-1],CellSizeRad,ThisGaussPars) +# T.timeit("givegauss") +# # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) +# if Normalise: +# PSF/=np.sum(PSF) +# PSF = np.fft.ifftshift(PSF) +# fPSF = np.fft.fft2(PSF) +# fPSF = np.abs(fPSF) +# for pol in range(npol): +# A = Ain[pol] +# fA = np.fft.fft2(A) +# nfA = fA*fPSF#Gauss +# if_fA = np.fft.ifft2(nfA) +# # if_fA=(nfA) +# Aout[ch,pol,:,:] = np.fft.fftshift(if_fA.real) +# T.timeit("conv") + +# return Aout + + +#def ConvolveGaussianR (Ain0, CellSizeRad=None, GaussPars=[(0., 0., 0.)], Normalise=False, out=None): +# warnings.warn("deprecated: this doesn't work for small ffts", DeprecationWarning) + +# nch, npol, _, _ = Ain0.shape +# Aout = np.zeros_like(Ain0) if out is None else out + +# T = ClassTimeIt.ClassTimeIt() +# T.disable() +# T.timeit("init") + +# for ch in range(nch): +# Ain = Ain0[ch] +# ThisGaussPars = GaussPars[ch] +# PSF = GiveGauss(Ain.shape[-1], CellSizeRad, ThisGaussPars) +# T.timeit("givegauss") +# # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) +# if Normalise: +# PSF /= np.sum(PSF) +# PSF = np.fft.ifftshift(PSF) +# fPSF = np.fft.rfft2(PSF) +# fPSF = np.abs(fPSF) +# for pol in range(npol): +# fA = np.fft.rfft2(np.fft.ifftshift(Ain[pol])) +# nfA = fA * fPSF +# if_fA = np.fft.irfft2(nfA, s=Ain[pol].shape) +# Aout[ch, pol, :, :] = np.fft.fftshift(if_fA) +# T.timeit("conv") + +# return Aout -ConvolveGaussian = ConvolveGaussianR # import pylab @@ -632,110 +715,110 @@ def ConvolveGaussianR (Ain0, CellSizeRad=None, GaussPars=[(0., 0., 0.)], Normali # return if_fA -def testConvolveGaussian(parallel=False): - nchan = 10 - npix = 2000 - T = ClassTimeIt.ClassTimeIt() - if parallel: - learnFFTWWisdom(npix) - T.timeit("learn") - APP.registerJobHandlers(_convolveSingleGaussian) - APP.startWorkers() - sd = shared_dict.attach("test") - A = sd.addSharedArray("A", (nchan,1,npix,npix),np.float32) - A[0,0,10,10]=1 - SigMaj=2#(20/3600.)*np.pi/180 - SigMin=1#(5/3600.)*np.pi/180 - ang=30.*np.pi/180 - GaussPars= [(SigMaj,SigMin,ang)]*nchan - CellSizeRad=1#(5./3600)*np.pi/180 - if parallel: - sd.addSharedArray("B", (nchan, 1, 2000, 2000), np.float32) - ConvolveGaussianInParallel(sd, "A", "B", CellSizeRad=CellSizeRad, GaussPars=GaussPars) - T.timeit("********* parallel") - ConvolveGaussianFFTW(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) - T.timeit("********* serial-fftw") - ConvolveGaussian(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) - T.timeit("********* serial-np") - ConvolveGaussianC(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) - T.timeit("********* serial-npc") - ConvolveGaussianR(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) - T.timeit("********* serial-npr") - - sd.delete() - if parallel: - APP.shutdown() - - - -class FFTWnp(): - def __init__(self, A, ncores = 1): - dtype=A.dtype - self.ThisType=dtype - - - - def fft(self,A): - axes=(-2,-1) - - T= ClassTimeIt.ClassTimeIt("ModFFTW") - T.disable() - - A = iFs(A.astype(self.ThisType),axes=axes) - T.timeit("shift and copy") - #print "do fft" - A = np.fft.fft2(A,axes=axes) - T.timeit("fft") - #print "done" - A=Fs(A,axes=axes)/(A.shape[-1]*A.shape[-2]) - T.timeit("shift") - return A - - - def ifft(self,A): - axes=(-2,-1) - #log=MyLogger.getLogger("ModToolBox.FFTM2.ifft") - A = iFs(A.astype(self.ThisType),axes=axes) - - #print "do fft" - A = np.fft.ifft2(A,axes=axes) - out=Fs(A,axes=axes)*(A.shape[-1]*A.shape[-2]) - return out - - -class FFTWnpNonorm(): - def __init__(self, A, ncores = 1): - #dtype=A.dtype - self.ThisType=np.complex64 - - - - def fft(self,A): - axes=(-2,-1) - - T= ClassTimeIt.ClassTimeIt("ModFFTW") - T.disable() - - A = iFs(A.astype(self.ThisType),axes=axes) - T.timeit("shift and copy") - #print "do fft" - A = np.fft.fft2(A,axes=axes) - T.timeit("fft") - #print "done" - A=Fs(A,axes=axes)#/(A.shape[-1]*A.shape[-2]) - T.timeit("shift") - return A - - - def ifft(self,A): - axes=(-2,-1) - #log=MyLogger.getLogger("ModToolBox.FFTM2.ifft") - A = iFs(A.astype(self.ThisType),axes=axes) - - #print "do fft" - A = np.fft.ifft2(A,axes=axes) - out=Fs(A,axes=axes)#*(A.shape[-1]*A.shape[-2]) - return out - - +#def testConvolveGaussian(parallel=False): +# nchan = 10 +# npix = 2000 +# T = ClassTimeIt.ClassTimeIt() +# if parallel: +# learnFFTWWisdom(npix) +# T.timeit("learn") +# APP.registerJobHandlers(_convolveSingleGaussian) +# APP.startWorkers() +# sd = shared_dict.attach("test") +# A = sd.addSharedArray("A", (nchan,1,npix,npix),np.float32) +# A[0,0,10,10]=1 +# SigMaj=2#(20/3600.)*np.pi/180 +# SigMin=1#(5/3600.)*np.pi/180 +# ang=30.*np.pi/180 +# GaussPars= [(SigMaj,SigMin,ang)]*nchan +# CellSizeRad=1#(5./3600)*np.pi/180 +# if parallel: +# sd.addSharedArray("B", (nchan, 1, 2000, 2000), np.float32) +# ConvolveGaussianInParallel(sd, "A", "B", CellSizeRad=CellSizeRad, GaussPars=GaussPars) +# T.timeit("********* parallel") +# ConvolveGaussianFFTW(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) +# T.timeit("********* serial-fftw") +# ConvolveGaussian(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) +# T.timeit("********* serial-np") +# ConvolveGaussianC(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) +# T.timeit("********* serial-npc") +# ConvolveGaussianR(A,CellSizeRad=CellSizeRad,GaussPars=GaussPars) +# T.timeit("********* serial-npr") +# +# sd.delete() +# if parallel: +# APP.shutdown() + +#class FFTWnp(): +# def __init__(self, A, ncores = 1): +# warnings.warn("deprecated: this doesn't work for small ffts", DeprecationWarning) + +# dtype=A.dtype +# self.ThisType=dtype + + + +# def fft(self,A): +# axes=(-2,-1) + +# T= ClassTimeIt.ClassTimeIt("ModFFTW") +# T.disable() + +# A = iFs(A.astype(self.ThisType),axes=axes) +# T.timeit("shift and copy") +# #print "do fft" +# A = np.fft.fft2(A,axes=axes) +# T.timeit("fft") +# #print "done" +# A=Fs(A,axes=axes)/(A.shape[-1]*A.shape[-2]) +# T.timeit("shift") +# return A +# + +# def ifft(self,A): +# axes=(-2,-1) +# #log=MyLogger.getLogger("ModToolBox.FFTM2.ifft") +# A = iFs(A.astype(self.ThisType),axes=axes) + +# #print "do fft" +# A = np.fft.ifft2(A,axes=axes) +# out=Fs(A,axes=axes)*(A.shape[-1]*A.shape[-2]) +# return out + +# +#class FFTWnpNonorm(): +# def __init__(self, A, ncores = 1): +# warnings.warn("deprecated: this doesn't work for small ffts", DeprecationWarning) + +# #dtype=A.dtype +# self.ThisType=np.complex64 + + + +# def fft(self,A): +# axes=(-2,-1) + +# T= ClassTimeIt.ClassTimeIt("ModFFTW") +# T.disable() + +# A = iFs(A.astype(self.ThisType),axes=axes) +# T.timeit("shift and copy") +# #print "do fft" +# A = np.fft.fft2(A,axes=axes) +# T.timeit("fft") +# #print "done" +# A=Fs(A,axes=axes)#/(A.shape[-1]*A.shape[-2]) +# T.timeit("shift") +# return A +# + +# def ifft(self,A): +# axes=(-2,-1) +# #log=MyLogger.getLogger("ModToolBox.FFTM2.ifft") +# A = iFs(A.astype(self.ThisType),axes=axes) + +# #print "do fft" +# A = np.fft.ifft2(A,axes=axes) +# out=Fs(A,axes=axes)#*(A.shape[-1]*A.shape[-2]) +# return out diff --git a/DDFacet/ToolsDir/ModToolBox.py b/DDFacet/ToolsDir/ModToolBox.py index 16294c19..efb41c65 100644 --- a/DDFacet/ToolsDir/ModToolBox.py +++ b/DDFacet/ToolsDir/ModToolBox.py @@ -41,13 +41,13 @@ # def EstimateNpix(Npix,Padding=1): # Npix=int(round(Npix)) - + # NpixOrig=Npix # if Npix%2!=0: Npix+=1 # Npix=ModToolBox.GiveClosestFastSize(Npix) # NpixOpt=Npix - - + + # Npix*=Padding # Npix=int(round(Npix)) # if Npix%2!=0: Npix+=1 @@ -55,7 +55,13 @@ # print ModColor.Str("(NpixOrig, NpixOpt, NpixOptPadded): %i --> %i --> %i"%(NpixOrig,NpixOpt,Npix)) # return NpixOpt,Npix -def EstimateNpix(Npix,Padding=1): +def EstimateNpix(Npix, + Padding=1.0, + min_size_fft=513): + """ Picks image size from the list of fast FFT sizes. + To avoid spectral leakage the number of taps in the FFT + must not be too small. + """ Npix=int(round(Npix)) Odd=True @@ -64,14 +70,16 @@ def EstimateNpix(Npix,Padding=1): #if Npix%2==0: Npix+=1 Npix=GiveClosestFastSize(Npix,Odd=Odd) NpixOpt=Npix - - - Npix*=Padding + + + Npix *= Padding + if Npix < min_size_fft: + Npix = min_size_fft + print>> log, "Your FFTs are very small. We will pad it %2.f x" % (float(Npix)/NpixOpt) Npix=int(round(Npix)) #if Npix%2!=0: Npix+=1 #if Npix%2==0: Npix+=1 Npix=GiveClosestFastSize(Npix,Odd=Odd) - #print>>log, ModColor.Str("With padding=%f: (NpixOrig, NpixOpt, NpixOptPadded): %i --> %i --> %i"%(Padding,NpixOrig,NpixOpt,Npix)) return NpixOpt,Npix class FFTW_Convolve(): @@ -115,7 +123,7 @@ def __init__(self,A): self.fft_FORWARD = pyfftw.FFTW(self.a, self.b,direction="FFTW_FORWARD",flags=('FFTW_ESTIMATE', ))#,threads=4) self.fft_BACKWARD = pyfftw.FFTW(self.a, self.b,direction="FFTW_BACKWARD",flags=('FFTW_ESTIMATE', ))#,threads=4) #print>>log, "done" - + def fft(self,A): #log=MyLogger.getLogger("ModToolBox.FFTM2.fft") self.a[:] = iFs(A.astype(self.ThisType),axes=-1) From a56d7aa2e3500c8cbc652bd6690cc3d5d860c9a5 Mon Sep 17 00:00:00 2001 From: landmanbester Date: Tue, 27 Jun 2017 11:39:26 +0200 Subject: [PATCH 03/58] Passing DegridFreqs to ImageNoiseMachine for FreqMachine initialisation --- DDFacet/Imager/ClassDeconvMachine.py | 6 ++++-- DDFacet/Imager/ClassImageNoiseMachine.py | 9 ++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index bf0679e6..df1b8e1a 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -204,7 +204,8 @@ def Init(self): - self.ImageNoiseMachine=ClassImageNoiseMachine.ClassImageNoiseMachine(self.GD,self.ModelMachine) + self.ImageNoiseMachine=ClassImageNoiseMachine.ClassImageNoiseMachine(self.GD,self.ModelMachine, + DegridFreqs=self.VS.FreqBandChannelsDegrid[0]) self.MaskMachine=ClassMaskMachine.ClassMaskMachine(self.GD) self.MaskMachine.setImageNoiseMachine(self.ImageNoiseMachine) @@ -1093,7 +1094,8 @@ def main(self,NMajor=None): self._fitAndSavePSF(self.FacetMachinePSF, cycle=iMajor) self.DeconvMachine.Init(PSFVar=self.DicoImagesPSF, PSFAve=self.PSFSidelobesAvg, approx=(sparsify > approximate_psf_above), - cache=not sparsify) + cache=not sparsify, GridFreqs=self.VS.FreqBandCenters, + DegridFreqs=self.VS.FreqBandChannelsDegrid[0], RefFreq=self.VS.RefFreq) # if we reached a sparsification of 1, we shan't be re-making the PSF if sparsify <= 1: diff --git a/DDFacet/Imager/ClassImageNoiseMachine.py b/DDFacet/Imager/ClassImageNoiseMachine.py index acc389ab..3432bab2 100644 --- a/DDFacet/Imager/ClassImageNoiseMachine.py +++ b/DDFacet/Imager/ClassImageNoiseMachine.py @@ -29,7 +29,7 @@ from DDFacet.ToolsDir import ModFFTW class ClassImageNoiseMachine(): - def __init__(self,GD,ExternalModelMachine=None): + def __init__(self,GD,ExternalModelMachine=None, DegridFreqs=None): self.GD=GD self.NoiseMap=None @@ -37,6 +37,7 @@ def __init__(self,GD,ExternalModelMachine=None): self.NoiseMapReShape=None self._id_InputMap=None self.ExternalModelMachine=ExternalModelMachine + self.DegridFreqs = DegridFreqs def setMainCache(self,MainCache): @@ -170,8 +171,10 @@ def giveBrutalRestored(self,DicoResidual): ParallelMode=False, CacheFileName="HMP_Masking", **self.MinorCycleConfig) - - self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["EstimatesAvgPSF"][-1]) + #print ModelMachine.FreqMachine.Freqs, ModelMachine.FreqMachine.Freqs + self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["EstimatesAvgPSF"][-1], + GridFreqs=DicoVariablePSF["freqs"], DegridFreqs=self.DegridFreqs, + RefFreq=self.RefFreq) if self.NoiseMapReShape is not None: self.DeconvMachine.setNoiseMap(self.NoiseMapReShape,PNRStop=self.GD["Mask"]["SigTh"]) From 13f519dcf4e299d23167897386c816af478db07f Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Wed, 28 Jun 2017 15:50:40 +0200 Subject: [PATCH 04/58] Swap gaussian convolver in ClassDeconvMachine --- DDFacet/Imager/ClassDeconvMachine.py | 41 +++++++++++++++++++++------ DDFacet/ToolsDir/.ModFFTW.py.swp | Bin 49152 -> 0 bytes DDFacet/ToolsDir/ModFFTW.py | 21 ++++++++++---- 3 files changed, 48 insertions(+), 14 deletions(-) delete mode 100644 DDFacet/ToolsDir/.ModFFTW.py.swp diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index aa8a48c0..3e511f97 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -1611,8 +1611,13 @@ def appconvmodel(): if label not in _images: if havenorm: out = _images.addSharedArray(label, appmodel().shape, np.float32) - ModFFTW.ConvolveGaussian(appmodel(), CellSizeRad=self.CellSizeRad, - GaussPars=[self.PSFGaussParsAvg], out=out) + ModFFTW.ConvolveGaussian(shareddict={"in": appmodel(), + "out": out}, + field_in = "in", + field_out = "out", + ch = 0, + CellSizeRad=self.CellSizeRad, + GaussPars_ch=self.PSFGaussParsAvg) T.timeit(label) else: _images[label] = intconvmodel() @@ -1620,9 +1625,15 @@ def appconvmodel(): def intconvmodel(): label = 'intconvmodel' if label not in _images: - out = _images.addSharedArray(label, intmodel().shape, np.float32) - ModFFTW.ConvolveGaussian(intmodel(), CellSizeRad=self.CellSizeRad, - GaussPars=[self.PSFGaussParsAvg], out=out) + out = _images.addSharedArray(label, intmodel().shape, + np.float32) + ModFFTW.ConvolveGaussian(shareddict={"in": intmodel(), + "out": out}, + field_in = "in", + field_out = "out", + ch = 0, + CellSizeRad=self.CellSizeRad, + GaussPars_ch=self.PSFGaussParsAvg) T.timeit(label) return _images[label] def appconvmodelcube(): @@ -1679,12 +1690,24 @@ def alphaconvmap(): # Get weighted alpha map a = _images.addSharedArray('alphaconvmap', weighted_alphamap().shape, np.float32) # Convolve with Gaussian - ModFFTW.ConvolveGaussian(weighted_alphamap(), CellSizeRad=self.CellSizeRad, - GaussPars=[self.PSFGaussParsAvg], out=a) + ModFFTW.ConvolveGaussian(shareddict={"in": weighted_alphamap(), + "out": a}, + field_in = "in", + field_out = "out", + ch = 0, + CellSizeRad=self.CellSizeRad, + GaussPars_ch=self.PSFGaussParsAvg) + # Get positive part of restored image b = _images.addSharedArray('posconvmod', alphamap().shape, np.float32) - ModFFTW.ConvolveGaussian(posintmod(), CellSizeRad=self.CellSizeRad, - GaussPars=[self.PSFGaussParsAvg], out=b) + ModFFTW.ConvolveGaussian(shareddict={"in": alphamap(), + "out": b}, + field_in = "in", + field_out = "out", + ch = 0, + CellSizeRad=self.CellSizeRad, + GaussPars_ch=self.PSFGaussParsAvg) + c = intconvmodel() # Get mask based on restored image and positive restored image RMS = give_final_RMS() diff --git a/DDFacet/ToolsDir/.ModFFTW.py.swp b/DDFacet/ToolsDir/.ModFFTW.py.swp deleted file mode 100644 index b8bcfc2c1ed8e3ce3a84b99efc911f6b31298a78..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 49152 zcmeI53v?t`d7#_C5@VZp63$_9D61K!rD?UK9@`mY+Cz_KWXAT$+L6YyMu>K)yCjvT zRo&@o%}8S(0_1Q4cmbPNSV$lN)?P3n3j~6_IDj!PJIiYcm~HDK7l0)aR@OtSm^ z_fb{-k~HHP155XrZ(4op*1fm>`~L5LZ!L^ox&MH(D>s_r^Mq9DF%L{%^@f+cwlvy_0U)YewI* zwOTdY>o-OY)GCvcvxjr_t;MpXr1U-YCGhJdfz{^X@UF+EcIL-aRZkC`=REmIhkm_e z(0|o{%_OQ0`-z6AOb=u4n4fxZNO%_Y#RpOyN3WbgD#x?glk|gTnO9YCrrS<2!8>80?V)mitu#!9e99A{9W)F z_%yr+UIRBm04s1c%)mH23LXLf8|A>~A%N$@Ww0N97oHC1!rAaMlpJ4&FT=;-t*{JJ zFa$qB3Gr3<6np^Q0)GOphL^%on1}7~Nfa7?34aXNLmg^xH9QNxg~H^1_&mGxHXHwoSPoHojYZ60!d$SrQeQ9On)CTDuSG(dlBv$tj(Uw_4ZrD?{Gh(t zRB6pR5=-DUO5WmPt9aYQ;4GoRNTe5Oa!HcbuyT6mx7YFQg5zjoMJ9qcI#f&8M`p+s3NEfVjQ1#Ear6i zi~>LC`k|K@sCe~;S9Y6TWt`IBhfbyDg~9fwbF9`l;!tXwaK)`wr8a~ES!b{Dj-A$F zw-NZkQe0t56g|ILDkW--Dp5SZ7}X|SmKy0u5zHiOkyFjN^|}{SGU`%sFiyIon-pIs zS`y>I8Pu_nxg7Mh;+|3sEQ1SzWYp@mK1!nL7QcwDzR$C`_uBIK$00&QcLa>PSXhwBN~sl;@zX zoFsvZqct+GgeB8n8(JlV`KA$F`MxC(y1)mae-Fld1_+%P-&@JTX3ty zGMcW3t4+U}E7w;`<=Se{Bo|T)1PWou&jo61y5jq&hB110~}NhBs-sv9p&zP^-12!h!Y6YfDRBBe&!=^G7W z$}uXDJNDGp><#-ux(*uVsk*Jn>H+3KYz&7<`~pMkw?sp6NL zt-e%1xnj5;l&2dtMjzocsj{~fr>AX75T>_QZ`hwY`?`c>Z%qurT79*oYpOn@QWgcP z21E9j%0R~rX>lnW`e9Z1V4%Hyduoz$Q&Ejo*4ZDFb6MwtF=vv_;s~v`Gt+E%UNh@V zQbv}Y$!e|E$U1v!VY3!wodZS38O@K4jSi3P939I#hh~bolv}OQ=pXa(u6o9Lqqfv= zS1xk=MaF{!rN&!Mv&LnQh&<1ksWEm#w5h9Vs}7CnYPI4lyGK2TKGtJ2P;p!}_H}Bs z?c8+9)FI~*j}aQRV0v|->X)6%{jwK?UMki5n(CHU7_2!Juj%^LFqb;8@8HC9i&L}3 zJ^L@;KYNu!S2?+Vc52_uj5B$~L8s_U7Z1+vpEz`R@t`w(=-~7fGy8ImGeeR|MrvWT z=^Wla``jxI%{s-YtDM8dg9i!hs*BXq*SP8(^#YY*#9sAz4xSRpHrIKQlnl@5s+Dvc z8AAABv*8mc{*b&p=yI(tQMpaWXJ|n~G1o{%_B>}5BZFuO)0*CRe)_kDr z;O~N{@e>OB^blHorL3GA9-AME{?5-EC5os>w8nv1x6ySc1Be2*np(Tk5M7YlPjK}h zy}`WROvrQSaSr5Ff)dIvqkYe3hmA@_^mU@3*XqxrQgKEpGa@QR4sELAu`Rx{^`EV&+1=C7YrY#zCI&dd>;tLloV~EAXQrx@Jjhj`Ui*iIuV!o*#vm z?x8wo?kk$_FeiWff>h{1eX1slQld;t$4|Ajs@SiI&Pmn8|F0{e%>R!vM}HT{{GX+1 z^|_rnz7Lne(3e180(}YeC2)EqAjVXD zu*<_zFLV}tuUf(OFr{1_=Q(v3GY#X6B61ufgo?LjSM_nhKvrKC^8{@RBc?D;M%L^x zOc0xhZh`qhA}nQR+btM->s(M?c7qV(VT|bbnp%god3)I;Gmd_orFuyQwheA$9XpW# z;|@_YVKzWQCDCF#ZPKsI9p#zCcu<8oFfcGt3y#*RM=>ODaSxWNo+D`pJWO~3+{&vw z9rvh|>4e#IrDXoUOook$jX3lFBPR1?{r^7rU+_`53El|T!L@K4TzEG8E4qO%!heC6 z!frSho(5;bS#UpV{9k}K!nJT5u7ce#2EPTr2@kTa|7G|i@F5KY@I-h5e3140KZ0X$ zG5n0R`=7%7@IT>pxEWp#e*klEAv_L#j-A7ILG}WC4n6|!ffvINIPg8}{O<*^|9>C6 z7mmX&coO^^+y49DOYjaj4leA4C&ClppHZUR1Ah%S!}DQ3JQps3e-HOy<9`J9fCG19 z+rI_};olyaO1&Fyf!DwyJR6<}|A&0~D7*$rAo-RB`!NN0E49RU-TD_w$4xy4#<(0^ zvzY&G=6?!hCYH^ihks?ID+k@m=AO3_pl)}lj++N( zo2YC`Rtd0h)YWuGRZ(jhsLY?^<{*n6TAYmGj=l#Y$yz`;F`os5wV9z~K7)Bt_w51J z{GSyJ8=|{Oh-$q+EJU{$rdP#UZon8k4@fJdl9aIc+^6zkbciSL zH1aHq)tcMv`6Ln;B8+UpGXdtC($e~C)+SsFTbZ_Bd@;*US!V#FE=KI5TQAj|_NX!elRTbK*+%M(vuu7VaP%n`1uu3R9^RnormQTz&Ky zKZs4x`8`L~8qLa9A7k4avYWxJiixkS78yUl29x`;4mNsTVY{?p+Xr)!)@X4CKZT+n zjH>Xo4Yxc`d0EPs-XTCQC#xeAZQ?E1EGbp}BVI;s)$5M<%1RHNEoDn8M7>C)X$K>pU@6Sa6;S!(!|x#6|ZasEw_UqfTpWLtk~sjQ6_%7d=*IB{}j*BuVcp$w<~@<-gV@guIlYZnf%Fb)sl!)_ImgS?5YEowV6O zbI9xxy3&+wi6*+1y)u>j?PeFUW#v*XWM*b5c&ZGBYZSm8nTgn8T}q_fxlkC+%;aZB zGUw-pjJN1#-hp1e!vfiSp`p!%Ei6Lx)8d`Uq7I*vKdGxV?w)@ z*@=^Ior%209+i{$Q%)+9L)j}6wQBWfSaz%4xcwH{tNLXf?6A=J4R&8nU8pGvkt$c*G9fR;%Q<*ja9inv_soH?gUDJd_y6 zP^YEpT!PK8Dm1#FBs>(PszenhEM!J=S@mx)+un3%ZBdH0IXdPO*)FKGSXcQj&iOQ+ zK8@$R7%l8cSbJ=+#GDL4{@$ajA02=(`bdV8>gHS7c*Jqry`pB1Tgxud&2iCC@*?O_x67IuS%2GC3`F*B}GmV{Uyv{3*=B*>Ep&>_3JYJR5$JqDO7}x+9cEwVf!ayOoMpQ3*{u3miwV6slc4?5JwW7AtyR)#*A5?0b=oTPuF0f{tv5Gp1%# z=BIQ6YQq!iHWS$X$w_Im-6dD_Xmz~@Dy$Ju=q-p=PA)c`QKl`lLr&8@Lej*Y8Pkl~ zmWW&#a*c|@l2IbhL zawof+T2mW8nogqLm!+FV@!cV{O6?fCxoSK_rd4nzB*(dqhlDlzSK(so)Z)x#bITRv z@vyVgVSGh5N-m-)Kzn(Cn)LcX*mQ%k8F9-T#e|;-j6Y2z>SFYtNrL%pV%sOX*oK|+ z+oQX%Yjh0n7Z4umQk^;}e`GWJSascaFcs5q#;%yZcEPA)d9>$Y-J_JWW?wSp3u zGIPwM(y?K!#`dq!mt9{(Az|^f6ia@{_AH#u)H$e38`IiSQeIA|*sboGr?R>M z>wPO~%@U1OA~X>(F2DN9f1P!w@_(xcv^rbe1fR+nHVEq%8o5o^Lrg?h{c1`AU({Xm zMp{~}hLqu^j1japIqrj(==@aOY3Yb1XbE}K@kPq|Ob!nMTC~y{{T>Zb$*U}RB~%BA zs=S3kf&{z4C!ZS~bcUS#plH6tUlfyQC|2AxlzdH;h&lE}S2LM-V|y;S?_FsbOY4Jk zWAn7zD%IwlX$zwH0N1!NzxB{LAUu2H|GT042W`6%JI0CZ1{!BOnWWN8O;V|q2dA6rO|I?R1 zUjn~y3CM^=J{t^VwTEAV|D!a8RF5M)KoYNjBW# z$XOLz4o!RPP%O?x0j@`}tjDp4=9Zs>@)p7~dmH zgmYp~=Zy1D&elb-nX(b%6_>^0kmyAtS9^7n?+#f;0jN+nN~umLr)Hm!sFe93)~l+c z!v0_@xu0ptU(Vo;RIKFepi%F3c~J3p%&#<;3z?0BNQfFyirQEVj@*i@8;juQwFg}Bj?l+xViE`%BRHK^AAS@KT3U)Hjj|9fV7P-DJ-7>Y0oa_;|E;7*XU06qq{z#Cv5 z9C$kX2Gr04ya1jK=fK$@=li`K-Ucs*D*O&Sh%Vq3cr%n?0d~Q^p%3^H{59MFa%SJP z@NM)0q9+&!vDZHf#Ag3*;Y;vgxEXGOHK;%t4!{VU0pCD(@MX9YJ`Oj*AHo7W6~2!S z;SgK^kA`ocJNPX88HgRhOW?&&f=l5-cq;q=J<8vL*cMy|^RNqkfPfFdGvNvFb!-Vf z0diLVYvEeh4Hv@*%FUy5KGzSG@H$A zgvbIus+VXz1zZ^}Ycn)+Jh?JSV+V>HHc$DLy#eLuWbjgcZ!M^V_o$c1P?l9c zv3vLK!RQE2vurBMp#W|^|cNYxFQdbu!2ijZ}wZR&tdf{&EiJQl|JcTh1O!fb#mwY+jdhwhdJio zCdqzFWTrhy*ltIJ6c9EF(DtJ8ScQ6ssF!fisrq@hJ&aFR*21uu#ay<$>~LyU&zOZPz*#9}(5@ zUI}|&_reRhT#%6A-KcYD39mVaf;_plcK zIOUrf2mX@P6m~5bQ_2W^^rKs)uot^j3wtSUo=OV+3br%?%L$RoM5v}@)aF4Qmeu^R>|*!4sfGl?mPeTPfrUj-)P+s&JH?e+A% z#Kop5F4p!XF7`@#OQ}^A@pLCPWz%%><-&$w^Gv_R#k#5o&_fRM78f~VJn;^KCk*~Jo_e1XLweO#k9yyQdhypO zZiTyMysZB-Wlw$0%>P%+{Qn=A^M4#<{r|Oa3>IJ?jKGh%>B}JJ{(k~Q5Ab@BegFT+ zeE&YU18#zsLJ6io_V@n`?u2*2;~gUt2shi}5&@IiPD%)kZkba(`OjXD2|;A*%4zQUaUcK8qQ zR1lj2IT-N=%<;bue+Qq0cf*ZvEnEo`Ap7~Z!*9YP;U4DypN5x102A=ra3=f@bO9fQ zcfu=R2F{0{Gxz@{djll;3^^04J&ORMfc>@O+$`n`LF zUB{%-dgtYw4sXcl8j zQ?Io5D3$H$Z!et3lcn?c6cVuI^Q4Mrv({2LTgs+t(<#!XG^+=hVt09qX{*{YpCJ|T zMr&@#52>4{cIZvna;tW*;*rU|%A?NFn5033mp;6_zI*pL=Bc6XsqEsseuRj}T(OP6Iu@>ieYqZ*LX46cd_BaS#Jw(i%ibhq)M4P4 zY{4Qv%w9O#~a;Uwcc{m6K*Hu7?0 zRHuhXOUmV`wd9!I6z?rWAfs9>23Ul!e zd6v<7-jFD?HWktwPndIkRCw%tk~yY6!XlN{hxypqEz4=s(+n<_*dXpTF0Cy*m(7Az z-u#u>;BcU9uakqOrU$idt))E|$w@5kG1qSr#X&Dztv18VHMIr5vSVOqV9-g^j>~)7 zc+X$K9v;SS!-O7KC37|4c5@%EcFVBXp4~8=Zet0RIKM8#@8^@9r&0ox{fG5$OE(51 z>gVguTzg_9x3bp$Ibll6U_1Ng!jYy;08Dk?q+^}7rYZ(xEiKG&IEW0{De9F z_uyXm8@L-@2d@Pea`3Or`R|6i;4|=7@D{iZ#4g}4TnR;Z4x9&P!@rQw2SDrv-T?B> zzu$*z-~f!nGawCOH}FmneZlMCC9ncjkTVCK4gZXe;P2sUa68-#H-el8Sc3iVEZ7BU z5Zi)9xD=iYavtCh&=>pz+y=M8Tj3S32;1SeK=vuT7v2mngn4)lJOdsD-$76CX}ATh zhh><8XTaHT7W@o-!B62$uop7$Pv{E7w&1gHD|`sv0xt&-#NOacxP!cXH@q5-z-1tI z29oEGgT%-7eOnx^Qr==ylIY%+vd91@uTMd+*IO>fV`f!ctsg12IGo5}^r0Q6U?c8Q zSANxow6p`g4_Q#7o0A;F_UeH|c0C7;C;x)^Zeyzn-7BAD*OIm4$A>->mWi+&XNR!m z_262$(=v2KQ6m=$8TyKNRwD&lxpN}}#RKZ_qfRd^{fj=_tR?V8=}pxA)UW7W;}SR@ zao3l55jn$U#wYDwS~??J_D?~jkCN0Gpr$TO1 z6+{mjvPQ{tVw2oo@fKogT4}xf@+oU6n8c|y8Buh_GdZa=$%&WzHfuoAyc*jKTeZZg z@Vo=LqOd_m>p!-QNfa9y+b05wV^;Sx08>H%ZX;35~fK@uSk=RsUtN- zEP5(j2V3$g4yrA;9+JYXXWu~4il&at8C9Gj7*nDP8!fF$P0&Z9CPca69W6I(DGD_M zj8bJv=Z10UJ9O^E^;#{hMS8};o@z#QpOIE!4;q2U!Kz7>L+c6;dDdEW<;J*b%bhJX zTNI4(<>wvXs##r#uXNOTNmRp?12Cs*&HXF&s<*MCQFHcs;}R2l}(0hUM2O5903wpIylO~(-pM6;&d zk5nHjn$EU6%ip{l82Be<*9m%>P%-Wbj+e^*;qSgPa4n z1X*}8JP95FXFv+x#JqnmOuztqhdI930Lb}(AA+~T+u&vJdm!fpo(ccRocR49dKw)fOAxqZL}Va#({--b&=nRpql zZk8o%wV-Kj$&JmW4M-nLYgT0ATTVl5CLx_msLCcgxiujjo2uzO2~kt36rCXn==X-y=7+1MhTuiP{bzHroI@K{z=#KS@OB(Hyicd;yH%Z#Owh@ZIc8&G^AXb!DScWUI zcs8(y{}LHkNc^2Rdn;b+j5}I~0>v?xGiq%`8;h#YGU#O70OQWAlOG-BAWHe4)fO46 z$r}qeoL`r|&LWzKyfBu|T#y^tc_H`ebF4?kcA4QrRJ01toSbXldSvzB{E$k4jzrF| z-Z`3U4?>@Cts+rzfB_TGh_Il1|wHRXX{hJaKy&W<#Q0>xo7uDJkk{ zJMUF`EWP-WHb>4QVmb9zT^WonDTAy!Cqa5Fw3^qBa7TxomJVIJ8yl^pQ@K{HH5S|k z(oZ@!{brR@_-*Pd4#&#LtB>S3O=Oz%YD}?6rC4~GWb&>GR%216(D|Rd z)f#zW;*~wWHPnsMr7951!W$<=?Z!Oul4A4wICt{FJS6;qZDll78|GBK0ak8EZOh3+>|*SXQE zeWe+aQ}0_>7>&h{p3Su0Xtg8k_L5=}C6h3_EIHcXi6hcJXk(-})Kj48HPPsYkb)B> ziyt&B9m#0%BS{y3Z3kJ}@nGxJ-nu?aD?B8=m)TOX_s*(=furs*e*PB2B?(N9z-2TL zwwo?-O=gmjv*gO@GhR~p(@U$sv7N|VREb@!mt^IK9F^_;CH|R&8)cOW_%saFyRH8m zWc#DcyOSBAC*HA`wKg8f|T9Am=Vb`kV^f@w*rZEAXDO_dZ+5U>S*G! YA?1Gag`q^<$)DufP@I_jhHF3n558t)IsgCw diff --git a/DDFacet/ToolsDir/ModFFTW.py b/DDFacet/ToolsDir/ModFFTW.py index 2323a66f..ba1a52c3 100644 --- a/DDFacet/ToolsDir/ModFFTW.py +++ b/DDFacet/ToolsDir/ModFFTW.py @@ -408,22 +408,33 @@ def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, # brings the central location back to middle + 1 and middle for even and # odd respectively. The signal can then safely be unpadded T = ClassTimeIt.ClassTimeIt() + T.disable() Ain = shareddict[field_in][ch] Aout = shareddict[field_out][ch] T.timeit("init %d"%ch) - PSF = GiveGauss(Ain.shape[-1], CellSizeRad, GaussPars_ch, parallel=True) + npol, npix_y, npix_x = Ain.shape + pad_edge_x = max(int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / + 2.0) * 2),0) + pad_edge_y = max(int(np.ceil((ModToolBox.EstimateNpix(npix_y)[1] - npix_y) / + 2.0) * 2),0) + PSF = GiveGauss(npix_x + pad_edge_x, CellSizeRad, GaussPars_ch, parallel=True) + # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) if Normalise: PSF /= np.sum(PSF) T.timeit("givegauss %d"%ch) - fPSF = np.fft.rfft2(PSF) + fPSF = np.fft.rfft2(iFs(PSF)) fPSF = np.abs(fPSF) - npol = Ain.shape[0] for pol in range(npol): - A = Ain[pol] + A = iFs(np.pad(Ain[pol], + pad_width=((pad_edge_y//2, pad_edge_x//2), + (pad_edge_y//2, pad_edge_x//2)), + mode="constant")) fA = np.fft.rfft2(A) nfA = fA*fPSF - Aout[pol, :, :] = np.fft.irfft2(nfA, s=A.shape) + Aout[pol, :, :] = Fs(np.fft.irfft2(nfA, s=A.shape)[pad_edge_y//2:npix_y-pad_edge_y//2, + pad_edge_x//2:npix_x-pad_edge_x//2]) + T.timeit("convolve %d" % ch) return Aout From 359241263ff1af52780b5e4b0e80d3548655a0d5 Mon Sep 17 00:00:00 2001 From: landmanbester Date: Thu, 6 Jul 2017 17:03:12 +0200 Subject: [PATCH 05/58] Removed multi-pol in ClassImageDeconvMachineHOGBOM. --- DDFacet/Imager/ClassDeconvMachine.py | 2 + .../HOGBOM/ClassImageDeconvMachineHogbom.py | 530 ++++++++---------- 2 files changed, 225 insertions(+), 307 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index bf0679e6..8b3a32c2 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -227,6 +227,8 @@ def Init(self): self.DeconvMachine=ClassImageDeconvMachineSSD.ClassImageDeconvMachine(MainCache=self.VS.maincache, **MinorCycleConfig) print>>log,"Using SSD with %s Minor Cycle algorithm"%self.GD["SSDClean"]["IslandDeconvMode"] elif self.GD["Deconv"]["Mode"] == "Hogbom": + if MinorCycleConfig["ImagePolDescriptor"] != ["I"]: + raise NotImplementedError("Multi-polarization CLEAN is not supported in MSMF") from DDFacet.Imager.HOGBOM import ClassImageDeconvMachineHogbom self.DeconvMachine=ClassImageDeconvMachineHogbom.ClassImageDeconvMachine(**MinorCycleConfig) print>>log,"Using Hogbom algorithm" diff --git a/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py b/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py index 323f9ef9..83f25e85 100644 --- a/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py +++ b/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py @@ -34,6 +34,7 @@ from DDFacet.Imager.ClassPSFServer import ClassPSFServer from DDFacet.Other.progressbar import ProgressBar from DDFacet.Imager import ClassGainMachine # Currently required by model machine but fixed to static mode +from DDFacet.ToolsDir import GiveEdges class ClassImageDeconvMachine(): @@ -80,30 +81,31 @@ def __init__(self,Gain=0.3, self.GainMachine = ClassGainMachine.ClassGainMachine(GainMin=Gain) if ModelMachine is None: from DDFacet.Imager import ClassModelMachineHogbom as ClassModelMachine - self.ModelMachine=ClassModelMachine.ClassModelMachine(self.GD,GainMachine=self.GainMachine) + self.ModelMachine = ClassModelMachine.ClassModelMachine(self.GD, GainMachine=self.GainMachine) else: self.ModelMachine = ModelMachine self.GainMachine = self.ModelMachine.GainMachine - self.PolarizationDescriptor = ImagePolDescriptor - self.PolarizationCleanTasks = [] - if "I" in self.PolarizationDescriptor: - self.PolarizationCleanTasks.append("I") - print>>log,"Found Stokes I. I will be CLEANed independently" - else: - print>>log, "Stokes I not available. Not performing intensity CLEANing." - if "V" in self.PolarizationDescriptor: - self.PolarizationCleanTasks.append("V") - print>> log, "Found Stokes V. V will be CLEANed independently" - else: - print>>log, "Did not find stokes V image. Will not clean Circular Polarization." - if set(["Q","U"]) < set(self.PolarizationDescriptor): - self.PolarizationCleanTasks.append("Q+iU") #Luke Pratley's complex polarization CLEAN - print>> log, "Found Stokes Q and U. Will perform joint linear (Pratley-Johnston-Hollitt) CLEAN." - else: - print>>log, "Stokes Images Q and U must both be synthesized to CLEAN linear polarization. " \ - "Will not CLEAN these." + self.GiveEdges = GiveEdges.GiveEdges + # self.PolarizationDescriptor = ImagePolDescriptor + # self.PolarizationCleanTasks = [] + # if "I" in self.PolarizationDescriptor: + # self.PolarizationCleanTasks.append("I") + # print>>log,"Found Stokes I. I will be CLEANed independently" + # else: + # print>>log, "Stokes I not available. Not performing intensity CLEANing." + # if "V" in self.PolarizationDescriptor: + # self.PolarizationCleanTasks.append("V") + # print>> log, "Found Stokes V. V will be CLEANed independently" + # else: + # print>>log, "Did not find stokes V image. Will not clean Circular Polarization." + # if set(["Q","U"]) < set(self.PolarizationDescriptor): + # self.PolarizationCleanTasks.append("Q+iU") #Luke Pratley's complex polarization CLEAN + # print>> log, "Found Stokes Q and U. Will perform joint linear (Pratley-Johnston-Hollitt) CLEAN." + # else: + # print>>log, "Stokes Images Q and U must both be synthesized to CLEAN linear polarization. " \ + # "Will not CLEAN these." # reset overall iteration counter - self._niter = np.zeros([len(self.PolarizationCleanTasks)],dtype=np.int64) + self._niter = 0 if CleanMaskImage is not None: print>>log, "Reading mask image: %s"%CleanMaskImage MaskArray=image(CleanMaskImage).getdata() @@ -157,7 +159,7 @@ def SetModelShape(self): """ self.ModelMachine.setModelShape(self._Dirty.shape) - def GiveModelImage(self,*args): return self.ModelMachine.GiveModelImage(*args) + def GiveModelImage(self, *args): return self.ModelMachine.GiveModelImage(*args) def setSideLobeLevel(self,SideLobeLevel,OffsetSideLobe): self.SideLobeLevel=SideLobeLevel @@ -185,47 +187,48 @@ def SetDirty(self,DicoDirty): if self.MaskArray is None: self._MaskArray=np.zeros(self._Dirty.shape,dtype=np.bool8) - def GiveEdges(self,(xc0,yc0),N0,(xc1,yc1),N1): - """ - Each pixel in the image is associated with a different facet each with - a different PSF. - This finds the indices corresponding to the edges of a local psf centered - on a specific pixel, here xc0,yc0. - """ - M_xc=xc0 - M_yc=yc0 - NpixMain=N0 - F_xc=xc1 - F_yc=yc1 - NpixFacet=N1 - - ## X - M_x0=M_xc-NpixFacet/2 - x0main=np.max([0,M_x0]) - dx0=x0main-M_x0 - x0facet=dx0 - - M_x1=M_xc+NpixFacet/2 - x1main=np.min([NpixMain-1,M_x1]) - dx1=M_x1-x1main - x1facet=NpixFacet-dx1 - x1main+=1 - - ## Y - M_y0=M_yc-NpixFacet/2 - y0main=np.max([0,M_y0]) - dy0=y0main-M_y0 - y0facet=dy0 - - M_y1=M_yc+NpixFacet/2 - y1main=np.min([NpixMain-1,M_y1]) - dy1=M_y1-y1main - y1facet=NpixFacet-dy1 - y1main+=1 - - Aedge=[x0main,x1main,y0main,y1main] - Bedge=[x0facet,x1facet,y0facet,y1facet] - return Aedge,Bedge + # this is now imported from ToolsDir + # def GiveEdges(self,(xc0,yc0),N0,(xc1,yc1),N1): + # """ + # Each pixel in the image is associated with a different facet each with + # a different PSF. + # This finds the indices corresponding to the edges of a local psf centered + # on a specific pixel, here xc0,yc0. + # """ + # M_xc=xc0 + # M_yc=yc0 + # NpixMain=N0 + # F_xc=xc1 + # F_yc=yc1 + # NpixFacet=N1 + # + # ## X + # M_x0=M_xc-NpixFacet/2 + # x0main=np.max([0,M_x0]) + # dx0=x0main-M_x0 + # x0facet=dx0 + # + # M_x1=M_xc+NpixFacet/2 + # x1main=np.min([NpixMain-1,M_x1]) + # dx1=M_x1-x1main + # x1facet=NpixFacet-dx1 + # x1main+=1 + # + # ## Y + # M_y0=M_yc-NpixFacet/2 + # y0main=np.max([0,M_y0]) + # dy0=y0main-M_y0 + # y0facet=dy0 + # + # M_y1=M_yc+NpixFacet/2 + # y1main=np.min([NpixMain-1,M_y1]) + # dy1=M_y1-y1main + # y1facet=NpixFacet-dy1 + # y1main+=1 + # + # Aedge=[x0main,x1main,y0main,y1main] + # Bedge=[x0facet,x1facet,y0facet,y1facet] + # return Aedge,Bedge def SubStep(self,(dx,dy),LocalSM): """ @@ -282,172 +285,118 @@ def Deconvolve(self, ch=0): exit_msg = "" continue_deconvolution = False update_model = False - for pol_task_id, pol_task in enumerate(self.PolarizationCleanTasks): - if self._niter[pol_task_id] >= self.MaxMinorIter: - print>>log, ModColor.Str("Minor cycle CLEANing of %s has already reached the maximum number of minor " - "cycles... won't CLEAN this polarization further." % pol_task) - exit_msg = exit_msg +" " + "MaxIter" - continue_deconvolution = continue_deconvolution or False - update_model = update_model or False - continue #Previous minor clean on this polarization has reached the maximum number of minor cycles.... onwards to the next polarization - PeakMap = None - if pol_task == "I": - indexI = self.PolarizationDescriptor.index("I") - PeakMap = self.Dirty[indexI, :, :] - elif pol_task == "Q+iU": - indexQ = self.PolarizationDescriptor.index("Q") - indexU = self.PolarizationDescriptor.index("U") - PeakMap = np.abs(self.Dirty[indexQ, :, :] + 1.0j * self.Dirty[indexU, :, :]) ** 2 - elif pol_task == "V": - indexV = self.PolarizationDescriptor.index("V") - PeakMap = self.Dirty[indexV, :, :] - else: - raise ValueError("Invalid polarization cleaning task: %s. This is a bug" % pol_task) - - _,npix,_=self.Dirty.shape - xc=(npix)/2 - - npol,_,_=self.Dirty.shape - - m0,m1=PeakMap.min(),PeakMap.max() - - #These options should probably be moved into MinorCycleConfig in parset - DoAbs=int(self.GD["Deconv"]["AllowNegative"]) - print>>log, " Running minor cycle [MinorIter = %i/%i, SearchMaxAbs = %i]"%(self._niter[pol_task_id],self.MaxMinorIter,DoAbs) - - ## Determine which stopping criterion to use for flux limit - #Get RMS stopping criterion - NPixStats = self.GD["Deconv"]["NumRMSSamples"] - if NPixStats: - RandomInd=np.int64(np.random.rand(NPixStats)*npix**2) - if pol_task == "Q+iU": - RMS = np.std(np.sqrt(np.real(PeakMap).ravel()[RandomInd])) - else: - RMS=np.std(np.real(PeakMap.ravel()[RandomInd])) - else: - if pol_task == "Q+iU": - RMS=np.std(np.sqrt(PeakMap)) - else: - RMS = np.std(PeakMap) - self.RMS=RMS - - self.GainMachine.SetRMS(RMS) - - Fluxlimit_RMS = self.RMSFactor*RMS - - #Find position and intensity of first peak - x,y,MaxDirty=NpParallel.A_whereMax(PeakMap,NCPU=self.NCPU,DoAbs=DoAbs,Mask=self.MaskArray) - if pol_task == "I": - pass - elif pol_task == "Q+iU": - MaxDirty = np.sqrt(MaxDirty) - elif pol_task == "V": - pass - else: - raise ValueError("Invalid polarization cleaning task: %s. This is a bug" % pol_task) - #Itmp = np.argwhere(self.Dirty[0] == np.max(self.Dirty[0])) - #x, y = Itmp[0] - #MaxDirty = self.Dirty[0][x,y] - - #Get peak factor stopping criterion - Fluxlimit_Peak = MaxDirty*self.PeakFactor - - #Get side lobe stopping criterion - Fluxlimit_Sidelobe = ((self.CycleFactor-1.)/4.*(1.-self.SideLobeLevel)+self.SideLobeLevel)*MaxDirty if self.CycleFactor else 0 - - mm0,mm1=PeakMap.min(),PeakMap.max() - - # Choose whichever threshold is highest - StopFlux = max(Fluxlimit_Peak, Fluxlimit_RMS, Fluxlimit_Sidelobe, self.FluxThreshold) - - print>>log, " Dirty %s image peak flux = %10.6f Jy [(min, max) = (%.3g, %.3g) Jy]"%(pol_task,MaxDirty,mm0,mm1) - print>>log, " RMS-based threshold = %10.6f Jy [rms = %.3g Jy; RMS factor %.1f]"%(Fluxlimit_RMS, RMS, self.RMSFactor) - print>>log, " Sidelobe-based threshold = %10.6f Jy [sidelobe = %.3f of peak; cycle factor %.1f]"%(Fluxlimit_Sidelobe,self.SideLobeLevel,self.CycleFactor) - print>>log, " Peak-based threshold = %10.6f Jy [%.3f of peak]"%(Fluxlimit_Peak,self.PeakFactor) - print>>log, " Absolute threshold = %10.6f Jy"%(self.FluxThreshold) - print>>log, " Stopping flux = %10.6f Jy [%.3f of peak ]"%(StopFlux,StopFlux/MaxDirty) - - T=ClassTimeIt.ClassTimeIt() - T.disable() - - ThisFlux=MaxDirty - #print x,y - - if ThisFlux < StopFlux: - print>>log, ModColor.Str(" Initial maximum peak %g Jy below threshold, we're done CLEANing %s" % (ThisFlux, pol_task),col="green" ) - exit_msg = exit_msg + " " + "FluxThreshold" - continue_deconvolution = False or continue_deconvolution - update_model = False or update_model - continue #onto the next polarization - - #pBAR= ProgressBar(Title="Cleaning %s " % pol_task) - # pBAR.disable() - - self.GainMachine.SetFluxMax(ThisFlux) - #pBAR.render(0,100) # "g=%3.3f"%self.GainMachine.GiveGain()) - - def GivePercentDone(ThisMaxFlux): - fracDone=1.-(ThisMaxFlux-StopFlux)/(MaxDirty-StopFlux) - return max(int(round(100*fracDone)),100) - - #Do minor cycle deconvolution loop - try: - for i in range(self._niter[pol_task_id]+1,self.MaxMinorIter+1): - self._niter[pol_task_id] = i - #grab a new peakmap - PeakMap = None - if pol_task == "I": - indexI = self.PolarizationDescriptor.index("I") - PeakMap = self.Dirty[indexI, :, :] - elif pol_task == "Q+iU": - indexQ = self.PolarizationDescriptor.index("Q") - indexU = self.PolarizationDescriptor.index("U") - PeakMap = np.abs(self.Dirty[indexQ, :, :] + 1.0j * self.Dirty[indexU, :, :]) ** 2 - elif pol_task == "V": - indexV = self.PolarizationDescriptor.index("V") - PeakMap = self.Dirty[indexV, :, :] - else: - raise ValueError("Invalid polarization cleaning task: %s. This is a bug" % pol_task) - - x,y,ThisFlux=NpParallel.A_whereMax(PeakMap,NCPU=self.NCPU,DoAbs=DoAbs,Mask=self.MaskArray) - if pol_task == "I": - pass - elif pol_task == "Q+iU": - ThisFlux = np.sqrt(ThisFlux) - elif pol_task == "V": - pass - else: - raise ValueError("Invalid polarization cleaning task: %s. This is a bug" % pol_task) - - self.GainMachine.SetFluxMax(ThisFlux) - - T.timeit("max0") - - if ThisFlux <= StopFlux: - #pBAR.render(100,100) #"peak %.3g"%(ThisFlux,)) - print>>log, ModColor.Str(" CLEANing %s [iter=%i] peak of %.3g Jy lower than stopping flux" % (pol_task,i,ThisFlux),col="green") - cont = ThisFlux > self.FluxThreshold - if not cont: - print>>log, ModColor.Str(" CLEANing %s [iter=%i] absolute flux threshold of %.3g Jy has been reached" % (pol_task,i,self.FluxThreshold),col="green",Bold=True) - exit_msg = exit_msg + " " + "MinFluxRms" - continue_deconvolution = cont or continue_deconvolution - update_model = True or update_model - - break # stop cleaning this polariztion and move on to the next polarization job - - rounded_iter_step = 1 if i < 10 else ( - 10 if i < 200 else ( - 100 if i < 2000 - else 1000)) - # min(int(10**math.floor(math.log10(i))), 10000) - if i >= 10 and i % rounded_iter_step == 0: - # if self.GD["Debug"]["PrintMinorCycleRMS"]: - #rms = np.std(np.real(self._CubeDirty.ravel()[self.IndStats])) - print>> log, " [iter=%i] peak residual %.3g" % (i, ThisFlux) - - nch,npol,_,_=self._Dirty.shape - #Fpol contains the intensities at (x,y) per freq and polarisation - Fpol = np.zeros([nch, npol, 1, 1], dtype=np.float32) + + _,npix,_=self.Dirty.shape + xc=(npix)/2 + + npol,_,_=self.Dirty.shape + + # Get the PeakMap (first index will always be 0 because we only support I cleaning) + PeakMap = self.Dirty[0,:,:] + + m0,m1=PeakMap.min(),PeakMap.max() + + #These options should probably be moved into MinorCycleConfig in parset + DoAbs=int(self.GD["Deconv"]["AllowNegative"]) + print>>log, " Running minor cycle [MinorIter = %i/%i, SearchMaxAbs = %i]"%(self._niter, self.MaxMinorIter, DoAbs) + + ## Determine which stopping criterion to use for flux limit + #Get RMS stopping criterion + NPixStats = self.GD["Deconv"]["NumRMSSamples"] + if NPixStats: + RandomInd=np.int64(np.random.rand(NPixStats)*npix**2) + RMS=np.std(np.real(PeakMap.ravel()[RandomInd])) + else: + RMS = np.std(PeakMap) + + self.RMS=RMS + + self.GainMachine.SetRMS(RMS) + + Fluxlimit_RMS = self.RMSFactor*RMS + + #Find position and intensity of first peak + x,y,MaxDirty=NpParallel.A_whereMax(PeakMap,NCPU=self.NCPU,DoAbs=DoAbs,Mask=self.MaskArray) + + #Get peak factor stopping criterion + Fluxlimit_Peak = MaxDirty*self.PeakFactor + + #Get side lobe stopping criterion + Fluxlimit_Sidelobe = ((self.CycleFactor-1.)/4.*(1.-self.SideLobeLevel)+self.SideLobeLevel)*MaxDirty if self.CycleFactor else 0 + + mm0,mm1=PeakMap.min(),PeakMap.max() + + # Choose whichever threshold is highest + StopFlux = max(Fluxlimit_Peak, Fluxlimit_RMS, Fluxlimit_Sidelobe, self.FluxThreshold) + + print>>log, " Dirty image peak flux = %10.6f Jy [(min, max) = (%.3g, %.3g) Jy]"%(MaxDirty,mm0,mm1) + print>>log, " RMS-based threshold = %10.6f Jy [rms = %.3g Jy; RMS factor %.1f]"%(Fluxlimit_RMS, RMS, self.RMSFactor) + print>>log, " Sidelobe-based threshold = %10.6f Jy [sidelobe = %.3f of peak; cycle factor %.1f]"%(Fluxlimit_Sidelobe,self.SideLobeLevel,self.CycleFactor) + print>>log, " Peak-based threshold = %10.6f Jy [%.3f of peak]"%(Fluxlimit_Peak,self.PeakFactor) + print>>log, " Absolute threshold = %10.6f Jy"%(self.FluxThreshold) + print>>log, " Stopping flux = %10.6f Jy [%.3f of peak ]"%(StopFlux,StopFlux/MaxDirty) + + T=ClassTimeIt.ClassTimeIt() + T.disable() + + ThisFlux=MaxDirty + #print x,y + + if ThisFlux < StopFlux: + print>>log, ModColor.Str(" Initial maximum peak %g Jy below threshold, we're done CLEANing" % (ThisFlux),col="green" ) + exit_msg = exit_msg + " " + "FluxThreshold" + continue_deconvolution = False or continue_deconvolution + update_model = False or update_model + # No need to do anything further if we are already at the stopping flux + return exit_msg, continue_deconvolution, update_model + + # set peak in GainMachine (deprecated?) + self.GainMachine.SetFluxMax(ThisFlux) + + # def GivePercentDone(ThisMaxFlux): + # fracDone=1.-(ThisMaxFlux-StopFlux)/(MaxDirty-StopFlux) + # return max(int(round(100*fracDone)),100) + + #Do minor cycle deconvolution loop + try: + for i in range(self._niter+1,self.MaxMinorIter+1): + self._niter = i + #grab a new peakmap + PeakMap = self.Dirty[0, :, :] + + x,y,ThisFlux=NpParallel.A_whereMax(PeakMap,NCPU=self.NCPU,DoAbs=DoAbs,Mask=self.MaskArray) + + # deprecated? + self.GainMachine.SetFluxMax(ThisFlux) + + T.timeit("max0") + + if ThisFlux <= StopFlux: + print>>log, ModColor.Str(" CLEANing [iter=%i] peak of %.3g Jy lower than stopping flux" % (i,ThisFlux),col="green") + cont = ThisFlux > self.FluxThreshold + if not cont: + print>>log, ModColor.Str(" CLEANing [iter=%i] absolute flux threshold of %.3g Jy has been reached" % (i,self.FluxThreshold),col="green",Bold=True) + exit_msg = exit_msg + " " + "MinFluxRms" + continue_deconvolution = cont or continue_deconvolution + update_model = True or update_model + + break # stop cleaning if threshold reached + + # This is used to track Cleaning progress + rounded_iter_step = 1 if i < 10 else ( + 10 if i < 200 else ( + 100 if i < 2000 + else 1000)) + # min(int(10**math.floor(math.log10(i))), 10000) + if i >= 10 and i % rounded_iter_step == 0: + # if self.GD["Debug"]["PrintMinorCycleRMS"]: + #rms = np.std(np.real(self._CubeDirty.ravel()[self.IndStats])) + print>> log, " [iter=%i] peak residual %.3g" % (i, ThisFlux) + + nch,npol,_,_=self._Dirty.shape + #Fpol contains the intensities at (x,y) per freq and polarisation + Fpol = np.zeros([nch, npol, 1, 1], dtype=np.float32) + if self.MultiFreqMode: if self.GD["Hogbom"]["FreqMode"] == "Poly": Ncoeffs = self.GD["Hogbom"]["PolyFitOrder"] elif self.GD["Hogbom"]["FreqMode"] == "GPR": @@ -455,86 +404,53 @@ def GivePercentDone(ThisMaxFlux): else: raise NotImplementedError("FreqMode %s not supported" % self.GD["Hogbom"]["FreqMode"]) Coeffs = np.zeros([npol, Ncoeffs]) + else: + Coeffs = np.zeros([npol, Ncoeffs]) # to support per channel cleaning - # Get the JonesNorm - JonesNorm = (self.DicoDirty["JonesNorm"][:, :, x, y]).reshape((nch, npol, 1, 1)) - if pol_task == "I": - indexI = self.PolarizationDescriptor.index("I") - # Get the solution - Fpol[:, indexI, 0, 0] = self._Dirty[:, indexI, x, y]/np.sqrt(JonesNorm[:, indexI, 0, 0]) - # Fit a polynomial to get coeffs - Coeffs[indexI, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, indexI, 0, 0]) - # Overwrite with polynoimial fit - Fpol[:, indexI, 0, 0] = self.ModelMachine.FreqMachine.Eval(Coeffs[indexI, :]) - elif pol_task == "Q+iU": - indexQ = self.PolarizationDescriptor.index("Q") - indexU = self.PolarizationDescriptor.index("U") - Fpol[:, indexQ, 0, 0] = self._Dirty[:, indexQ, x, y] - Coeffs[indexQ, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, indexQ, 0, 0]) - Fpol[:, indexQ, 0, 0] = self.ModelMachine.FreqMachine.Eval(Coeffs[indexQ, :]) - Fpol[:, indexU, 0, 0] = self._Dirty[:, indexU, x, y] - Coeffs[indexU, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, indexU, 0, 0]) - Fpol[:, indexU, 0, 0] = self.ModelMachine.FreqMachine.Eval(Coeffs[indexU, :]) - elif pol_task == "V": - indexV = self.PolarizationDescriptor.index("V") - Fpol[:, indexV, 0, 0] = self._Dirty[:, indexV, x, y] - Coeffs[indexV, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, indexV, 0, 0]) - Fpol[:, indexV, 0, 0] = self.ModelMachine.FreqMachine.Eval(Coeffs[indexV, :]) - else: - raise ValueError("Invalid polarization cleaning task: %s. This is a bug" % pol_task) - - # if self.ModelMachine.FreqMachine.GP.SolverFlag: - # print " Flag set at location ", x, y - #nchan, npol, _, _ = Fpol.shape - #JonesNorm = (self.DicoDirty["JonesNorm"][:, :, x, y]).reshape((nchan, npol, 1, 1)) - # dx=x-xc - # dy=y-xc - T.timeit("stuff") - - #Find PSF corresponding to location (x,y) - self.PSFServer.setLocation(x,y) #Selects the facet closest to (x,y) - PSF, meanPSF = self.PSFServer.GivePSF() #Gives associated PSF - - T.timeit("FindScale") - - CurrentGain = self.GainMachine.GiveGain() - #Update model - if pol_task == "I": - indexI = self.PolarizationDescriptor.index("I") - self.ModelMachine.AppendComponentToDictStacked((x, y), 1.0, Coeffs[indexI, :], indexI) - elif pol_task == "Q+iU": - indexQ = self.PolarizationDescriptor.index("Q") - indexU = self.PolarizationDescriptor.index("U") - self.ModelMachine.AppendComponentToDictStacked((x, y), 1.0, Coeffs[indexQ, :], indexQ) - self.ModelMachine.AppendComponentToDictStacked((x, y), 1.0, Coeffs[indexU, :], indexU) - elif pol_task == "V": - indexV = self.PolarizationDescriptor.index("V") - self.ModelMachine.AppendComponentToDictStacked((x, y), 1.0, Coeffs[indexV, :], indexV) - else: - raise ValueError("Invalid polarization cleaning task: %s. This is a bug" % pol_task) - - # Subtract LocalSM*CurrentGain from dirty image - _,_,PSFnx,PSFny = PSF.shape - PSF /= np.amax(PSF.reshape(nch,npol,PSFnx*PSFny), axis=2, keepdims=True).reshape(nch,npol,1,1) #Normalise PSF in each channel - #tmp = PSF*Fpol*np.sqrt(JonesNorm) - self.SubStep((x,y),PSF*Fpol*CurrentGain*np.sqrt(JonesNorm)) - T.timeit("SubStep") - - T.timeit("End") - - except KeyboardInterrupt: - print>>log, ModColor.Str(" CLEANing %s [iter=%i] minor cycle interrupted with Ctrl+C, peak flux %.3g" % (pol_task,self._niter[pol_task_id], ThisFlux)) - exit_msg = exit_msg + " " + "MaxIter" - continue_deconvolution = False or continue_deconvolution - update_model = True or update_model - break #stop cleaning this polarization and move on to the next one - - if self._niter[pol_task_id] >= self.MaxMinorIter: #Reached maximum iterations for this polarization: - print>> log, ModColor.Str(" CLEANing %s [iter=%i] Reached maximum number of iterations, peak flux %.3g" % (pol_task, self._niter[pol_task_id], ThisFlux)) - exit_msg = exit_msg + " " + "MaxIter" - continue_deconvolution = False or continue_deconvolution - update_model = True or update_model - #onwards to the next polarization <-- + # Get the JonesNorm + JonesNorm = (self.DicoDirty["JonesNorm"][:, :, x, y]).reshape((nch, npol, 1, 1)) + + # Get the solution + Fpol[:, 0, 0, 0] = self._Dirty[:, 0, x, y]/np.sqrt(JonesNorm[:, 0, 0, 0]) + # Fit a polynomial to get coeffs + Coeffs[0, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, 0, 0, 0]) + # Overwrite with polynoimial fit + Fpol[:, 0, 0, 0] = self.ModelMachine.FreqMachine.Eval(Coeffs[0, :]) + + T.timeit("stuff") + + #Find PSF corresponding to location (x,y) + self.PSFServer.setLocation(x,y) #Selects the facet closest to (x,y) + PSF, meanPSF = self.PSFServer.GivePSF() #Gives associated PSF + _, _, PSFnx, PSFny = PSF.shape + # Normalise PSF in each channel + PSF /= np.amax(PSF.reshape(nch, npol, PSFnx * PSFny), axis=2, keepdims=True).reshape(nch, npol, 1, 1) + + T.timeit("FindScale") + + CurrentGain = self.GainMachine.GiveGain() + + #Update model + self.ModelMachine.AppendComponentToDictStacked((x, y), 1.0, Coeffs[0, :], 0) + + # Subtract LocalSM*CurrentGain from dirty image + self.SubStep((x,y),PSF*Fpol*CurrentGain*np.sqrt(JonesNorm)) + T.timeit("SubStep") + + T.timeit("End") + + except KeyboardInterrupt: + print>>log, ModColor.Str(" CLEANing [iter=%i] minor cycle interrupted with Ctrl+C, peak flux %.3g" % (self._niter, ThisFlux)) + exit_msg = exit_msg + " " + "MaxIter" + continue_deconvolution = False or continue_deconvolution + update_model = True or update_model + return exit_msg, continue_deconvolution, update_model + + if self._niter >= self.MaxMinorIter: #Reached maximum number of iterations: + print>> log, ModColor.Str(" CLEANing [iter=%i] Reached maximum number of iterations, peak flux %.3g" % (self._niter, ThisFlux)) + exit_msg = exit_msg + " " + "MaxIter" + continue_deconvolution = False or continue_deconvolution + update_model = True or update_model return exit_msg, continue_deconvolution, update_model From 91920831dd03a3f9bb6c930b6caea4a3cf3a9d07 Mon Sep 17 00:00:00 2001 From: landmanbester Date: Thu, 6 Jul 2017 17:25:05 +0200 Subject: [PATCH 06/58] Added auto-masking options to ClassImageDeconvMachineHogbom --- .../HOGBOM/ClassImageDeconvMachineHogbom.py | 98 +++++++------------ 1 file changed, 37 insertions(+), 61 deletions(-) diff --git a/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py b/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py index 83f25e85..f1eda4bf 100644 --- a/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py +++ b/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py @@ -25,6 +25,7 @@ """ import numpy as np +import numexpr from DDFacet.Other import MyLogger from DDFacet.Other import ModColor log=MyLogger.getLogger("ClassImageDeconvMachine") @@ -86,25 +87,6 @@ def __init__(self,Gain=0.3, self.ModelMachine = ModelMachine self.GainMachine = self.ModelMachine.GainMachine self.GiveEdges = GiveEdges.GiveEdges - # self.PolarizationDescriptor = ImagePolDescriptor - # self.PolarizationCleanTasks = [] - # if "I" in self.PolarizationDescriptor: - # self.PolarizationCleanTasks.append("I") - # print>>log,"Found Stokes I. I will be CLEANed independently" - # else: - # print>>log, "Stokes I not available. Not performing intensity CLEANing." - # if "V" in self.PolarizationDescriptor: - # self.PolarizationCleanTasks.append("V") - # print>> log, "Found Stokes V. V will be CLEANed independently" - # else: - # print>>log, "Did not find stokes V image. Will not clean Circular Polarization." - # if set(["Q","U"]) < set(self.PolarizationDescriptor): - # self.PolarizationCleanTasks.append("Q+iU") #Luke Pratley's complex polarization CLEAN - # print>> log, "Found Stokes Q and U. Will perform joint linear (Pratley-Johnston-Hollitt) CLEAN." - # else: - # print>>log, "Stokes Images Q and U must both be synthesized to CLEAN linear polarization. " \ - # "Will not CLEAN these." - # reset overall iteration counter self._niter = 0 if CleanMaskImage is not None: print>>log, "Reading mask image: %s"%CleanMaskImage @@ -171,6 +153,17 @@ def SetPSF(self,DicoVariablePSF): self.PSFServer.setDicoVariablePSF(DicoVariablePSF) self.DicoVariablePSF=DicoVariablePSF + def setNoiseMap(self, NoiseMap, PNRStop=10): + """Sets the noise map. The mean dirty will be divided by the noise map before peak finding. + If PNRStop is set, an additional stopping criterion (peak-to-noisemap) will be applied. + Peaks are reported in units of sigmas. + If PNRStop is not set, NoiseMap is treated as simply an (inverse) weighting that will bias + peak selection in the minor cycle. In this mode, peaks are reported in units of flux. + """ + self._NoiseMap = NoiseMap + self._PNRStop = PNRStop + self._peakMode = "sigma" + def SetDirty(self,DicoDirty): self.DicoDirty=DicoDirty self._Dirty = self.DicoDirty["ImageCube"] @@ -182,53 +175,23 @@ def SetDirty(self,DicoDirty): off=(NPSF-NDirty)/2 self.DirtyExtent=(off,off+NDirty,off,off+NDirty) + if self._peakMode is "sigma": + print>> log, "Will search for the peak in the SNR-weighted dirty map" + a, b = self._MeanDirty, self._NoiseMap.reshape(self._MeanDirty.shape) + self._PeakSearchImage = numexpr.evaluate("a/b") + # elif self._peakMode is "weighted": ######## will need to get a PeakWeightImage from somewhere for this option + # print>> log, "Will search for the peak in the weighted dirty map" + # a, b = self._MeanDirty, self._peakWeightImage + # self._PeakSearchImage = numexpr.evaluate("a*b") + else: + print>> log, "Will search for the peak in the unweighted dirty map" + self._PeakSearchImage = self._MeanDirty + if self.ModelImage is None: self._ModelImage=np.zeros_like(self._Dirty) if self.MaskArray is None: self._MaskArray=np.zeros(self._Dirty.shape,dtype=np.bool8) - # this is now imported from ToolsDir - # def GiveEdges(self,(xc0,yc0),N0,(xc1,yc1),N1): - # """ - # Each pixel in the image is associated with a different facet each with - # a different PSF. - # This finds the indices corresponding to the edges of a local psf centered - # on a specific pixel, here xc0,yc0. - # """ - # M_xc=xc0 - # M_yc=yc0 - # NpixMain=N0 - # F_xc=xc1 - # F_yc=yc1 - # NpixFacet=N1 - # - # ## X - # M_x0=M_xc-NpixFacet/2 - # x0main=np.max([0,M_x0]) - # dx0=x0main-M_x0 - # x0facet=dx0 - # - # M_x1=M_xc+NpixFacet/2 - # x1main=np.min([NpixMain-1,M_x1]) - # dx1=M_x1-x1main - # x1facet=NpixFacet-dx1 - # x1main+=1 - # - # ## Y - # M_y0=M_yc-NpixFacet/2 - # y0main=np.max([0,M_y0]) - # dy0=y0main-M_y0 - # y0facet=dy0 - # - # M_y1=M_yc+NpixFacet/2 - # y1main=np.min([NpixMain-1,M_y1]) - # dy1=M_y1-y1main - # y1facet=NpixFacet-dy1 - # y1main+=1 - # - # Aedge=[x0main,x1main,y0main,y1main] - # Bedge=[x0facet,x1facet,y0facet,y1facet] - # return Aedge,Bedge def SubStep(self,(dx,dy),LocalSM): """ @@ -475,3 +438,16 @@ def FromFile(self, fname): Read model dict from file SubtractModel """ self.ModelMachine.FromFile(fname) + + def updateRMS(self): + _,npol,npix,_ = self._MeanDirty.shape + NPixStats = self.GD["Deconv"]["NumRMSSamples"] + if NPixStats: + #self.IndStats=np.int64(np.random.rand(NPixStats)*npix**2) + self.IndStats=np.int64(np.linspace(0,self._PeakSearchImage.size-1,NPixStats)) + else: + self.IndStats = slice(None) + self.RMS=np.std(np.real(self._PeakSearchImage.ravel()[self.IndStats])) + + def resetCounter(self): + self._niter = 0 \ No newline at end of file From 3e1681b53206487c1ffd0259ba46ceb727ceeded Mon Sep 17 00:00:00 2001 From: landmanbester Date: Thu, 6 Jul 2017 17:41:41 +0200 Subject: [PATCH 07/58] Passing DegridFreqs to ClassImageNoiseMachine --- DDFacet/Imager/ClassDeconvMachine.py | 3 ++- DDFacet/Imager/ClassImageNoiseMachine.py | 30 +++++++++++++++++------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index adf76619..0da79474 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -204,7 +204,8 @@ def Init(self): - self.ImageNoiseMachine=ClassImageNoiseMachine.ClassImageNoiseMachine(self.GD,self.ModelMachine) + self.ImageNoiseMachine=ClassImageNoiseMachine.ClassImageNoiseMachine(self.GD,self.ModelMachine, + DegridFreqs=self.VS.FreqBandChannelsDegrid[0]) self.MaskMachine=ClassMaskMachine.ClassMaskMachine(self.GD) self.MaskMachine.setImageNoiseMachine(self.ImageNoiseMachine) diff --git a/DDFacet/Imager/ClassImageNoiseMachine.py b/DDFacet/Imager/ClassImageNoiseMachine.py index acc389ab..eb3db7bc 100644 --- a/DDFacet/Imager/ClassImageNoiseMachine.py +++ b/DDFacet/Imager/ClassImageNoiseMachine.py @@ -25,11 +25,11 @@ import scipy.special import copy from DDFacet.Imager.ModModelMachine import ClassModModelMachine -from DDFacet.Imager.MSMF import ClassImageDeconvMachineMSMF + from DDFacet.ToolsDir import ModFFTW class ClassImageNoiseMachine(): - def __init__(self,GD,ExternalModelMachine=None): + def __init__(self,GD,ExternalModelMachine=None, DegridFreqs=None): self.GD=GD self.NoiseMap=None @@ -37,6 +37,7 @@ def __init__(self,GD,ExternalModelMachine=None): self.NoiseMapReShape=None self._id_InputMap=None self.ExternalModelMachine=ExternalModelMachine + self.DegridFreqs = DegridFreqs def setMainCache(self,MainCache): @@ -109,7 +110,8 @@ def setPSF(self,DicoVariablePSF): def giveBrutalRestored(self,DicoResidual): print>>log," Running Brutal HMP..." - ListSilentModules=["ClassImageDeconvMachineMSMF","ClassPSFServer","ClassMultiScaleMachine","GiveModelMachine","ClassModelMachineMSMF"] + ListSilentModules=["ClassImageDeconvMachineMSMF","ClassPSFServer","ClassMultiScaleMachine","GiveModelMachine", + "ClassModelMachineMSMF", "ClassImageDeconvMachineHogbom", "ClassModelMachineHogbom"] # MyLogger.setSilent(ListSilentModules) self.DicoDirty=DicoResidual self.Orig_MeanDirty=self.DicoDirty["MeanImage"].copy() @@ -165,13 +167,23 @@ def giveBrutalRestored(self,DicoResidual): MinorCycleConfig["ModelMachine"]=ModelMachine #MinorCycleConfig["CleanMaskImage"]=None self.MinorCycleConfig=MinorCycleConfig - self.DeconvMachine=ClassImageDeconvMachineMSMF.ClassImageDeconvMachine(MainCache=self.MainCache, - CacheSharedMode=True, - ParallelMode=False, - CacheFileName="HMP_Masking", - **self.MinorCycleConfig) + if self.GD["Deconv"]["Mode"]=="HMP": + from DDFacet.Imager.MSMF import ClassImageDeconvMachineMSMF + self.DeconvMachine=ClassImageDeconvMachineMSMF.ClassImageDeconvMachine(MainCache=self.MainCache, + CacheSharedMode=True, + ParallelMode=False, + CacheFileName="HMP_Masking", + **self.MinorCycleConfig) + elif self.GD["Deconv"]["Mode"]=="Hogbom": + from DDFacet.Imager.MOGBOM import ClassImageDeconvMachineHogbom + self.DeconvMachine=ClassImageDeconvMachineHogbom.ClassImageDeconvMachine(MainCache=self.MainCache, + CacheSharedMode=True, + ParallelMode=False, + CacheFileName="HMP_Masking", + **self.MinorCycleConfig) - self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["EstimatesAvgPSF"][-1]) + self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["EstimatesAvgPSF"][-1], + GridFreqs=DicoVariablePSF["freqs"], DegridFreqs=self.DegridFreqs, RefFreq=self.RefFreq) if self.NoiseMapReShape is not None: self.DeconvMachine.setNoiseMap(self.NoiseMapReShape,PNRStop=self.GD["Mask"]["SigTh"]) From 54cb8924ac7dbe68cbc90c3ba192509486b5531d Mon Sep 17 00:00:00 2001 From: landmanbester Date: Fri, 7 Jul 2017 13:19:17 +0200 Subject: [PATCH 08/58] Seems like auto-masking is working with Hogbom mode --- DDFacet/Imager/ClassDeconvMachine.py | 5 +- DDFacet/Imager/ClassImageNoiseMachine.py | 13 ++-- .../HOGBOM/ClassImageDeconvMachineHogbom.py | 18 ++--- .../Imager/HOGBOM/ClassModelMachineHogbom.py | 73 +++++++++++-------- 4 files changed, 61 insertions(+), 48 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index ee81151e..2a507945 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -205,7 +205,8 @@ def Init(self): self.ImageNoiseMachine=ClassImageNoiseMachine.ClassImageNoiseMachine(self.GD,self.ModelMachine, - DegridFreqs=self.VS.FreqBandChannelsDegrid[0]) + DegridFreqs=self.VS.FreqBandChannelsDegrid[0], + GridFreqs=self.VS.FreqBandCenters) self.MaskMachine=ClassMaskMachine.ClassMaskMachine(self.GD) self.MaskMachine.setImageNoiseMachine(self.ImageNoiseMachine) @@ -229,7 +230,7 @@ def Init(self): print>>log,"Using SSD with %s Minor Cycle algorithm"%self.GD["SSDClean"]["IslandDeconvMode"] elif self.GD["Deconv"]["Mode"] == "Hogbom": if MinorCycleConfig["ImagePolDescriptor"] != ["I"]: - raise NotImplementedError("Multi-polarization CLEAN is not supported in MSMF") + raise NotImplementedError("Multi-polarization CLEAN is not supported in Hogbom") from DDFacet.Imager.HOGBOM import ClassImageDeconvMachineHogbom self.DeconvMachine=ClassImageDeconvMachineHogbom.ClassImageDeconvMachine(**MinorCycleConfig) print>>log,"Using Hogbom algorithm" diff --git a/DDFacet/Imager/ClassImageNoiseMachine.py b/DDFacet/Imager/ClassImageNoiseMachine.py index eb3db7bc..ba790f50 100644 --- a/DDFacet/Imager/ClassImageNoiseMachine.py +++ b/DDFacet/Imager/ClassImageNoiseMachine.py @@ -29,7 +29,7 @@ from DDFacet.ToolsDir import ModFFTW class ClassImageNoiseMachine(): - def __init__(self,GD,ExternalModelMachine=None, DegridFreqs=None): + def __init__(self,GD,ExternalModelMachine=None, DegridFreqs=None, GridFreqs=None): self.GD=GD self.NoiseMap=None @@ -38,6 +38,7 @@ def __init__(self,GD,ExternalModelMachine=None, DegridFreqs=None): self._id_InputMap=None self.ExternalModelMachine=ExternalModelMachine self.DegridFreqs = DegridFreqs + self.GridFreqs = GridFreqs def setMainCache(self,MainCache): @@ -124,7 +125,7 @@ def giveBrutalRestored(self,DicoResidual): #self.GD["Parallel"]["NCPU"]=1 #self.GD["HMP"]["Alpha"]=[0,0,1]#-1.,1.,5] self.GD["HMP"]["Alpha"]=[0,0,1] - self.GD["Deconv"]["Mode"]="HMP" + #self.GD["Deconv"]["Mode"]="HMP" #self.GD["Deconv"]["CycleFactor"]=0 #self.GD["Deconv"]["PeakFactor"]=0.0 self.GD["Deconv"]["PSFBox"]="full" @@ -175,15 +176,17 @@ def giveBrutalRestored(self,DicoResidual): CacheFileName="HMP_Masking", **self.MinorCycleConfig) elif self.GD["Deconv"]["Mode"]=="Hogbom": - from DDFacet.Imager.MOGBOM import ClassImageDeconvMachineHogbom + from DDFacet.Imager.HOGBOM import ClassImageDeconvMachineHogbom self.DeconvMachine=ClassImageDeconvMachineHogbom.ClassImageDeconvMachine(MainCache=self.MainCache, CacheSharedMode=True, ParallelMode=False, CacheFileName="HMP_Masking", **self.MinorCycleConfig) - + else: + NotImplementedError("Mode %s not compatible with automasking" % self.GD["Deconv"]["Mode"]) + self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["EstimatesAvgPSF"][-1], - GridFreqs=DicoVariablePSF["freqs"], DegridFreqs=self.DegridFreqs, RefFreq=self.RefFreq) + GridFreqs=self.GridFreqs, DegridFreqs=self.DegridFreqs, RefFreq=self.RefFreq) if self.NoiseMapReShape is not None: self.DeconvMachine.setNoiseMap(self.NoiseMapReShape,PNRStop=self.GD["Mask"]["SigTh"]) diff --git a/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py b/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py index f1eda4bf..37ccc637 100644 --- a/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py +++ b/DDFacet/Imager/HOGBOM/ClassImageDeconvMachineHogbom.py @@ -97,6 +97,11 @@ def __init__(self,Gain=0.3, for pol in range(npol): self._MaskArray[ch,pol,:,:]=np.bool8(1-MaskArray[ch,pol].T[::-1].copy())[:,:] self.MaskArray=self._MaskArray[0] + self._peakMode = "normal" + + self.CurrentNegMask = None + self._NoiseMap = None + self._PNRStop = None # in _peakMode "sigma", provides addiitonal stopping criterion def Init(self, **kwargs): @@ -105,14 +110,7 @@ def Init(self, **kwargs): self.SetModelRefFreq(kwargs["RefFreq"]) self.ModelMachine.setFreqMachine(kwargs["GridFreqs"], kwargs["DegridFreqs"]) self.Freqs = kwargs["GridFreqs"] - # tmp = [{'Alpha': 0.0, 'Scale': 0, 'ModelType': 'Delta'}] - # AlphaMin, AlphaMax, NAlpha = self.GD["HMP"]["Alpha"] - # if NAlpha > 1: - # print>>log, "Multi-frequency synthesis not supported in Hogbom CLEAN. Setting alpha to zero" - # - # self.ModelMachine.setListComponants(tmp) - # Set gridding Freqs def Reset(self): pass @@ -231,7 +229,7 @@ def setChannel(self,ch=0): self.MaskArray = self._MaskArray.view()[ch] - def Deconvolve(self, ch=0): + def Deconvolve(self, ch=0, **kwargs): """ Runs minor cycle over image channel 'ch'. initMinor is number of minor iteration (keeps continuous count through major iterations) @@ -368,7 +366,7 @@ def Deconvolve(self, ch=0): raise NotImplementedError("FreqMode %s not supported" % self.GD["Hogbom"]["FreqMode"]) Coeffs = np.zeros([npol, Ncoeffs]) else: - Coeffs = np.zeros([npol, Ncoeffs]) # to support per channel cleaning + Coeffs = np.zeros([npol, nch]) # to support per channel cleaning # Get the JonesNorm JonesNorm = (self.DicoDirty["JonesNorm"][:, :, x, y]).reshape((nch, npol, 1, 1)) @@ -376,6 +374,8 @@ def Deconvolve(self, ch=0): # Get the solution Fpol[:, 0, 0, 0] = self._Dirty[:, 0, x, y]/np.sqrt(JonesNorm[:, 0, 0, 0]) # Fit a polynomial to get coeffs + # tmp = self.ModelMachine.FreqMachine.Fit(Fpol[:, 0, 0, 0]) + # print tmp.shape Coeffs[0, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, 0, 0, 0]) # Overwrite with polynoimial fit Fpol[:, 0, 0, 0] = self.ModelMachine.FreqMachine.Eval(Coeffs[0, :]) diff --git a/DDFacet/Imager/HOGBOM/ClassModelMachineHogbom.py b/DDFacet/Imager/HOGBOM/ClassModelMachineHogbom.py index c959b3ce..16fae58b 100644 --- a/DDFacet/Imager/HOGBOM/ClassModelMachineHogbom.py +++ b/DDFacet/Imager/HOGBOM/ClassModelMachineHogbom.py @@ -25,21 +25,21 @@ class ClassModelMachine(ClassModelMachinebase.ClassModelMachine): def __init__(self,*args,**kwargs): ClassModelMachinebase.ClassModelMachine.__init__(self, *args, **kwargs) + self.DicoSMStacked={} + self.DicoSMStacked["Type"]="Hogbom" + def setRefFreq(self, RefFreq, Force=False): + if self.RefFreq is not None and not Force: + print>>log, ModColor.Str("Reference frequency already set to %f MHz" % (self.RefFreq/1e6)) + return - def setRefFreq(self, RefFreq): self.RefFreq = RefFreq self.DicoSMStacked["RefFreq"] = RefFreq - # self.DicoSMStacked["AllFreqs"] = np.array(AllFreqs) - def setFreqMachine(self,GridFreqs, DegridFreqs): # Initiaise the Frequency Machine self.FreqMachine = ClassFrequencyMachine.ClassFrequencyMachine(GridFreqs, DegridFreqs, self.DicoSMStacked["RefFreq"], self.GD) self.FreqMachine.set_Method(mode=self.GD["Hogbom"]["FreqMode"]) - #print "Grid freqs size = ", GridFreqs.size - #print "Degrid freqs size =", DegridFreqs.size - def ToFile(self, FileName, DicoIn=None): print>> log, "Saving dico model to %s" % FileName @@ -48,30 +48,26 @@ def ToFile(self, FileName, DicoIn=None): else: D = DicoIn + D["GD"] = self.GD D["Type"] = "Hogbom" D["ListScales"] = "Delta" D["ModelShape"] = self.ModelShape MyPickle.Save(D, FileName) - def FromFile(self, FileName): print>> log, "Reading dico model from %s" % FileName self.DicoSMStacked = MyPickle.Load(FileName) self.FromDico(self.DicoSMStacked) - def FromDico(self, DicoSMStacked): self.DicoSMStacked = DicoSMStacked self.RefFreq = self.DicoSMStacked["RefFreq"] self.ListScales = self.DicoSMStacked["ListScales"] self.ModelShape = self.DicoSMStacked["ModelShape"] - - def setModelShape(self, ModelShape): self.ModelShape = ModelShape - def AppendComponentToDictStacked(self, key, Fpol, Sols, pol_array_index=0): """ Adds component to model dictionary (with key l,m location tupple). Each @@ -92,7 +88,13 @@ def AppendComponentToDictStacked(self, key, Fpol, Sols, pol_array_index=0): if not (pol_array_index >= 0 and pol_array_index < npol): raise ValueError("Pol_array_index must specify the index of the slice in the " "model cube the solution should be stored at. Please report this bug.") - DicoComp = self.DicoSMStacked["Comp"] + + try: + DicoComp=self.DicoSMStacked["Comp"] + except: + self.DicoSMStacked["Comp"]={} + DicoComp=self.DicoSMStacked["Comp"] + if not (key in DicoComp.keys()): DicoComp[key] = {} for p in range(npol): @@ -109,31 +111,38 @@ def AppendComponentToDictStacked(self, key, Fpol, Sols, pol_array_index=0): DicoComp[key]["SolsArray"][:, pol_array_index] += Weight * SolNorm - def GiveModelImage(self, FreqIn=None): - RefFreq = self.DicoSMStacked["RefFreq"] - if FreqIn is None: - FreqIn = np.array([RefFreq]) + def GiveModelImage(self, FreqIn=None, DoAbs=False): + try: + if DoAbs: + f_apply = np.abs + else: + f_apply = lambda x: x + + RefFreq=self.DicoSMStacked["RefFreq"] + # Default to reference frequency if no input given + if FreqIn is None: + FreqIn=np.array([RefFreq]) - # if type(FreqIn)==float: - # FreqIn=np.array([FreqIn]).flatten() - # if type(FreqIn)==np.ndarray: + FreqIn = np.array([FreqIn.ravel()]).flatten() - FreqIn = np.array([FreqIn.ravel()]).flatten() + DicoComp = self.DicoSMStacked["Comp"] + _, npol, nx, ny = self.ModelShape - DicoComp = self.DicoSMStacked["Comp"] - _, npol, nx, ny = self.ModelShape + # The model shape has nchan=len(GridFreqs) + nchan = FreqIn.size - nchan = FreqIn.size - ModelImage = np.zeros((nchan, npol, nx, ny), dtype=np.float32) - DicoSM = {} - for key in DicoComp.keys(): - for pol in range(npol): - Sol = DicoComp[key]["SolsArray"][:, pol] # /self.DicoSMStacked[key]["SumWeights"] - x, y = key - #tmp = self.FreqMachine.Eval_Degrid(Sol, FreqIn) - ModelImage[:, pol, x, y] += self.FreqMachine.Eval_Degrid(Sol, FreqIn) + ModelImage = np.zeros((nchan, npol, nx, ny), dtype=np.float32) + DicoSM = {} + for key in DicoComp.keys(): + for pol in range(npol): + Sol = DicoComp[key]["SolsArray"][:, pol] # /self.DicoSMStacked[key]["SumWeights"] + x, y = key + #tmp = self.FreqMachine.Eval_Degrid(Sol, FreqIn) + ModelImage[:, pol, x, y] += f_apply(self.FreqMachine.Eval_Degrid(Sol, FreqIn)) - return ModelImage + return ModelImage + except: + return 0 def GiveSpectralIndexMap(self, threshold=0.1, save_dict=True): # Get the model image From f4c5d72b7125e2b0e17abb2a8d1f14a082fd155b Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 7 Jul 2017 13:25:00 +0200 Subject: [PATCH 09/58] Fix unit test. Fix typo in ModFFTW --- DDFacet/Tests/FastUnitTests/TestFitter.py | 12 +++++++-- DDFacet/ToolsDir/ModFFTW.py | 33 ++++++++++++----------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/DDFacet/Tests/FastUnitTests/TestFitter.py b/DDFacet/Tests/FastUnitTests/TestFitter.py index 29916793..adf20936 100644 --- a/DDFacet/Tests/FastUnitTests/TestFitter.py +++ b/DDFacet/Tests/FastUnitTests/TestFitter.py @@ -75,11 +75,19 @@ def restoreFittedBeam(rot): inp = gauss2d([1,imgSize/2,imgSize/2,params[1],params[0],params[2]],circle=0,rotate=1,vheight=0)(xx,yy) inp = inp.reshape(1,1,imgSize,imgSize) #fit - fittedParams = tuple((fitter.FitCleanBeam(inp[0, 0, :, :]) * np.array([cellSize, cellSize, 1])).tolist()) + fittedParams = tuple((fitter.FitCleanBeam(inp[0, 0, :, :]) * + np.array([cellSize, cellSize, 1])).tolist()) #restore fitted clean beam with an FFT convolution: delta = np.zeros([1, 1, imgSize, imgSize]) delta[0, 0, imgSize / 2, imgSize / 2] = 1 - rest = fftconvolve.ConvolveGaussian(delta,cellSize,GaussPars=[fittedParams],Normalise=False) + rest = fftconvolve.ConvolveGaussian(shareddict={"in":delta, + "out":delta}, + field_in="in", + field_out="out", + ch=0, + CellSizeRad=cellSize, + GaussPars_ch=fittedParams, + Normalise=False) assert np.allclose(inp, rest, rtol=1e-2, atol=1e-2) for r in np.linspace(0,360,100): restoreFittedBeam(r) diff --git a/DDFacet/ToolsDir/ModFFTW.py b/DDFacet/ToolsDir/ModFFTW.py index ba1a52c3..8650fc8a 100644 --- a/DDFacet/ToolsDir/ModFFTW.py +++ b/DDFacet/ToolsDir/ModFFTW.py @@ -351,11 +351,12 @@ def _convolveSingleGaussianFFTW(shareddict, Aout = shareddict[field_out][ch] T.timeit("init %d"%ch) npol, npix_y, npix_x = Ain.shape - pad_edge_x = max(int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / + assert npix_y == npix_x, "Only supports square grids at the moment" + pad_edge = max(int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / 2.0) * 2),0) - pad_edge_y = max(int(np.ceil((ModToolBox.EstimateNpix(npix_y)[1] - npix_y) / - 2.0) * 2),0) - PSF = GiveGauss(npix_x + pad_edge_x, CellSizeRad, GaussPars_ch, parallel=True) + PSF = np.pad(GiveGauss(npix_x, CellSizeRad, GaussPars_ch, parallel=True), + ((pad_edge//2,pad_edge//2),(pad_edge//2,pad_edge//2)), + mode="constant") if Normalise: PSF /= np.sum(PSF) @@ -366,8 +367,7 @@ def _convolveSingleGaussianFFTW(shareddict, fPSF = np.abs(fPSF) for pol in range(npol): A = iFs(np.pad(Ain[pol], - pad_width=((pad_edge_y//2, pad_edge_x//2), - (pad_edge_y//2, pad_edge_x//2)), + ((pad_edge//2,pad_edge//2),(pad_edge//2,pad_edge//2)), mode="constant")) fA = pyfftw.interfaces.numpy_fft.rfft2(A, overwrite_input=True, threads=nthreads) @@ -376,8 +376,8 @@ def _convolveSingleGaussianFFTW(shareddict, pyfftw.interfaces.numpy_fft.irfft2(nfA, s=A.shape, overwrite_input=True, - threads=nthreads)[pad_edge_y//2:npix_y-pad_edge_y//2, - pad_edge_x//2:npix_x-pad_edge_x//2]) + threads=nthreads))[pad_edge//2:pad_edge//2+npix_y, + pad_edge//2:pad_edge//2+npix_x] T.timeit("convolve %d" % ch) return Aout @@ -413,11 +413,12 @@ def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, Aout = shareddict[field_out][ch] T.timeit("init %d"%ch) npol, npix_y, npix_x = Ain.shape - pad_edge_x = max(int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / + assert npix_y == npix_x, "Only supports square grids at the moment" + pad_edge = max(int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / 2.0) * 2),0) - pad_edge_y = max(int(np.ceil((ModToolBox.EstimateNpix(npix_y)[1] - npix_y) / - 2.0) * 2),0) - PSF = GiveGauss(npix_x + pad_edge_x, CellSizeRad, GaussPars_ch, parallel=True) + PSF = np.pad(GiveGauss(npix_x, CellSizeRad, GaussPars_ch, parallel=True), + ((pad_edge//2,pad_edge//2),(pad_edge//2,pad_edge//2)), + mode="constant") # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32) if Normalise: @@ -427,13 +428,13 @@ def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, fPSF = np.abs(fPSF) for pol in range(npol): A = iFs(np.pad(Ain[pol], - pad_width=((pad_edge_y//2, pad_edge_x//2), - (pad_edge_y//2, pad_edge_x//2)), + ((pad_edge//2,pad_edge//2),(pad_edge//2,pad_edge//2)), mode="constant")) fA = np.fft.rfft2(A) nfA = fA*fPSF - Aout[pol, :, :] = Fs(np.fft.irfft2(nfA, s=A.shape)[pad_edge_y//2:npix_y-pad_edge_y//2, - pad_edge_x//2:npix_x-pad_edge_x//2]) + Aout[pol, :, :] = Fs(np.fft.irfft2(nfA, + s=A.shape))[pad_edge//2:npix_y+pad_edge//2, + pad_edge//2:npix_x+pad_edge//2] T.timeit("convolve %d" % ch) return Aout From 0602041222d5dec48b680531937a48b9626bdca8 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Mon, 10 Jul 2017 14:38:25 +0200 Subject: [PATCH 10/58] Initial Stokes Residues dumping routine --- DDFacet/Imager/ClassDDEGridMachine.py | 2 +- DDFacet/Imager/ClassDeconvMachine.py | 141 ++++++++++++++++++++++++-- DDFacet/Imager/ClassFacetMachine.py | 17 +++- DDFacet/Parset/DefaultParset.cfg | 7 +- DDFacet/ToolsDir/ModToolBox.py | 25 +++-- setup.cfg | 4 +- 6 files changed, 173 insertions(+), 23 deletions(-) diff --git a/DDFacet/Imager/ClassDDEGridMachine.py b/DDFacet/Imager/ClassDDEGridMachine.py index 4872d533..a5ec1089 100644 --- a/DDFacet/Imager/ClassDDEGridMachine.py +++ b/DDFacet/Imager/ClassDDEGridMachine.py @@ -341,7 +341,7 @@ def __init__(self, # Npix=Npix*2 Precision = GD["RIME"]["Precision"] - PolMode = GD["RIME"]["PolMode"] + PolMode = ExpectedOutputStokes if Precision == "S": self.dtype = np.complex64 diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index df1b8e1a..91d73852 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -47,6 +47,8 @@ from DDFacet.Other import ClassTimeIt import numexpr from DDFacet.Imager import ClassImageNoiseMachine +from DDFacet.Data import ClassStokes + # from astropy import wcs # from astropy.io import fits @@ -99,6 +101,7 @@ def __init__(self, GD=None, self.PointingID=PointingID self.do_predict_only = predict_only self.do_data, self.do_psf, self.do_readcol, self.do_deconvolve = data, psf, readcol, deconvolve + self.FacetMachine=None self.FWHMBeam = None self.PSFGaussPars = None @@ -140,6 +143,9 @@ def __init__(self, GD=None, self._saveims = set(saveimages) | set(saveonly) self._savecubes = allchars if savecubes.lower() == "all" else set(savecubes) + self.do_stokes_residue = (self.GD["Output"]["StokesResidues"] != self.GD["RIME"]["PolMode"] and + ("r" in self._saveims or "r" in self._savecubes or + "R" in self._saveims or "R" in self._savecubes)) ## disabling this, as it doesn't play nice with in-place FFTs # self._save_intermediate_grids = self.GD["Debug"]["SaveIntermediateDirtyImages"] @@ -242,6 +248,7 @@ def Init(self): if self.DoSmoothBeam: AverageBeamMachine=ClassBeamMean.ClassBeamMean(self.VS) self.FacetMachine.setAverageBeamMachine(AverageBeamMachine) + self.StokesFacetMachine and self.StokesFacetMachine.setAverageBeamMachine(AverageBeamMachine) # tell VisServer to not load weights if self.do_predict_only: self.VS.IgnoreWeights() @@ -251,14 +258,23 @@ def Init(self): # and proceed with background tasks self.VS.CalcWeightsBackground() self.FacetMachine and self.FacetMachine.initCFInBackground() + self.StokesFacetMachine and self.StokesFacetMachine.initCFInBackground(other_fm=self.FacetMachine) # FacetMachinePSF will skip CF init if they match those of FacetMachine if self.FacetMachinePSF is not None: self.FacetMachinePSF.initCFInBackground(other_fm=self.FacetMachine) def CreateFacetMachines (self): """Creates FacetMachines for data and/or PSF""" - self.FacetMachine = self.FacetMachinePSF = None + self.StokesFacetMachine = self.FacetMachine = self.FacetMachinePSF = None MainFacetOptions = self.GiveMainFacetOptions() + if self.do_stokes_residue: + self.StokesFacetMachine = ClassFacetMachine(self.VS, + self.GD, + Precision=self.Precision, + PolMode=self.GD["Output"]["StokesResidues"]) + self.StokesFacetMachine.appendMainField(ImageName="%s.image"%self.BaseName,**MainFacetOptions) + self.StokesFacetMachine.Init() + if self.do_data: self.FacetMachine = ClassFacetMachine(self.VS, self.GD, Precision=self.Precision, PolMode=self.PolMode) @@ -489,7 +505,7 @@ def GiveDirty(self, psf=False, sparsify=0, last_cycle=False): psf_cachepath, psf_valid, psf_writecache = self._checkForCachedPSF(sparsify, key=cache_key) else: psf_valid = psf_writecache = False - + current_model_freqs = np.array([]) ModelImage = None # load from cache @@ -661,7 +677,6 @@ def SaveDirtyProducts(self): self.FacetMachine.ToCasaImage(self.DicoDirty["ImageCube"],ImageName="%s.cube.dirty"%self.BaseName, Fits=True,Freqs=self.VS.FreqBandCenters,Stokes=self.VS.StokesConverter.RequiredStokesProducts()) - if "n" in self._saveims: FacetNormReShape = self.FacetMachine.getNormDict()["FacetNormReShape"] self.FacetMachine.ToCasaImage(FacetNormReShape, @@ -893,7 +908,9 @@ def main(self,NMajor=None): # Polarization clean is not going to be supported. We can only make dirty maps if self.VS.StokesConverter.RequiredStokesProducts() != ['I']: - raise RuntimeError("Unsupported: Polarization cleaning is not defined") + raise RuntimeError("Unsupported: Polarization cleaning is not"\ + " supported. Maybe you meant Output-StokesResidues"\ + " instead?") # if we reached a sparsification of 1, we shan't be re-making the PSF if not sparsify: @@ -1172,6 +1189,120 @@ def main(self,NMajor=None): if self.HasDeconvolved: self.Restore() + # Last major cycle may output residues other than Stokes I + # Since the current residue images are for Stokes I only + # we need to redo them in all required stokes + self.do_stokes_residue and self._dump_stokes_residues() + + def _dump_stokes_residues(self): + """ + Precondition: Must have already initialized a Facet Machine in self.FacetMachine + Post-conditions: Dump out stokes residues to disk as requested in + Output-StokesResidues, Stokes residues stored in self.DicoDirty + """ + print>>log, ModColor.Str("============================== Making Stokes residue maps ====================") + print>>log, ModColor.Str ("W.A.R.N.I.N.G: Stokes parameters other than I have not been deconvolved. Use these maps" + " only as a debugging tool.", col="yellow") + + # tell the I/O thread to go load the first chunk + self.VS.ReInitChunkCount() + self.VS.startChunkLoadInBackground() + + # init new grids for Stokes residues + self.FacetMachine.initCFInBackground() + self.StokesFacetMachine.initCFInBackground() + self.StokesFacetMachine.Init() + self.StokesFacetMachine.ReinitDirty() + + current_model_freqs = np.array([]) #invalidate + while True: + # note that collectLoadedChunk() will destroy the current DATA dict, so we must make sure + # the gridding jobs of the previous chunk are finished + self.StokesFacetMachine.collectGriddingResults() + + # get loaded chunk from I/O thread, schedule next chunk + # self.VS.startChunkLoadInBackground() + DATA = self.VS.collectLoadedChunk(start_next=True) + if type(DATA) is str: + print>>log,ModColor.Str("no more data: %s"%DATA, col="red") + break + # None weights indicates an all-flagged chunk: go on to the next chunk + if DATA["Weights"] is None: + continue + + # Stacks average beam if not computed + self.StokesFacetMachine.StackAverageBeam(DATA) + self.FacetMachine.StackAverageBeam(DATA) + + # Predict and subtract from current MS vis data + model_freqs = DATA["FreqMappingDegrid"] + + # switch subband if necessary + if not np.array_equal(model_freqs, current_model_freqs): + ModelImage = self.FacetMachine.setModelImage(self.DeconvMachine.GiveModelImage(model_freqs)) + # write out model image, if asked to + current_model_freqs = model_freqs + print>>log,"model image @%s MHz (min,max) = (%f, %f)"%(str(model_freqs/1e6),ModelImage.min(),ModelImage.max()) + else: + print>>log,"reusing model image from previous chunk" + + if self.PredictMode == "BDA-degrid" or self.PredictMode == "DeGridder": + self.FacetMachine.getChunkInBackground(DATA) + elif self.PredictMode == "Montblanc": + from ClassMontblancMachine import ClassMontblancMachine + model = self.ModelMachine.GiveModelList() + mb_machine = ClassMontblancMachine(self.GD, fm_predict.Npix, self.FacetMachine.CellSizeRad) + mb_machine.getChunk(DATA, DATA["data"], model, self.VS.ListMS[DATA["iMS"]]) + mb_machine.close() + else: + raise ValueError("Invalid PredictMode '%s'" % self.PredictMode) + + # Ensure degridding and subtraction has finished before firing up + # gridding + self.FacetMachine.collectDegriddingResults() + + # Grid residue vis + self.StokesFacetMachine.putChunkInBackground(DATA) + + # if Smooth beam enabled, either compute it from the stack, or get it from cache + # else do nothing + self.StokesFacetMachine.finaliseSmoothBeam() + + # fourier transform, stitch facets and release grids + self.DicoDirty = self.StokesFacetMachine.FacetsToIm(NormJones=True) + self.StokesFacetMachine.releaseGrids() + + # All done dump the stokes residues + if "r" in self._saveims: + self.StokesFacetMachine.ToCasaImage(self.DicoDirty["MeanImage"], + ImageName="%s.stokes.residual"%self.BaseName, + Fits=True, + Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) + if "r" in self._savecubes: + self.FacetMachine.ToCasaImage(self.DicoDirty["ImageCube"], + ImageName="%s.cube.stokes.residual"%self.BaseName, + Fits=True, + Freqs=self.VS.FreqBandCenters, + Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) + if self.DicoDirty["JonesNorm"] is not None: + # this assumes the matrix squareroot is the same for all + # stokes parameters. This may or may not be true so take this + # with a bag of salt + nch,npol,nx,ny = self.DicoDirty["ImageCube"].shape + DirtyCorr = self.DicoDirty["ImageCube"]/np.sqrt(self.DicoDirty["JonesNorm"]) + if "R" in self._saveims: + MeanCorr = np.mean(DirtyCorr, axis=0).reshape((1, npol, nx, ny)) + self.FacetMachine.ToCasaImage(MeanCorr, + ImageName="%s.stokes.corr.residual"%self.BaseName, + Fits=True, + Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) + if "R" in self._savecubes: + self.FacetMachine.ToCasaImage(DirtyCorr, + ImageName="%s.cube.stokes.corr.residual"%self.BaseName, + Fits=True, + Freqs=self.VS.FreqBandCenters, + Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) + def fitSinglePSF(self, PSF, off, label="mean"): """ Fits a PSF given by argument @@ -1883,8 +2014,6 @@ def alphaconvmap(): APP.runJob("del:sqrtnormcube", self._delSharedImage_worker, io=0, args=[_images.readwrite(), "sqrtnormcube"]) APP.awaitJobResults(["save:*", "del:*"]) - self.FacetMachinePSF = None - self.FacetMachine = None def testDegrid(self): import pylab diff --git a/DDFacet/Imager/ClassFacetMachine.py b/DDFacet/Imager/ClassFacetMachine.py index 2610829c..15b72b4d 100644 --- a/DDFacet/Imager/ClassFacetMachine.py +++ b/DDFacet/Imager/ClassFacetMachine.py @@ -43,6 +43,7 @@ from DDFacet.ToolsDir.GiveEdges import GiveEdges from DDFacet.Imager.ClassImToGrid import ClassImToGrid from DDFacet.cbuild.Gridder import _pyGridderSmearPols +from DDFacet.Data.ClassStokes import ClassStokes #from DDFacet.Array import NpParallel from DDFacet.Other import MyLogger log=MyLogger.getLogger("ClassFacetMachine") @@ -70,7 +71,7 @@ def __init__(self, GD, # ParsetFile="ParsetNew.txt", Precision="S", - PolMode="I", + PolMode=["I"], Sols=None, PointingID=0, DoPSF=False, @@ -96,7 +97,9 @@ def __init__(self, self.PointingID = PointingID self.VS, self.GD = VS, GD - self.npol = self.VS.StokesConverter.NStokesInImage() + self.StokesConverter = ClassStokes(self.VS.StokesConverter.AvailableCorrelationProductsIds(), + PolMode) + self.npol = self.StokesConverter.NStokesInImage() self.Parallel = True if APP is not None: APP.registerJobHandlers(self) @@ -276,6 +279,11 @@ def AppendFacet(self, iFacet, l0, m0, diam): NpixFacet, _ = EstimateNpix(diam / self.CellSizeRad, Padding=1) _, NpixPaddedGrid = EstimateNpix(NpixFacet, Padding=self.Padding) + if NpixPaddedGrid / NpixFacet > self.Padding: + print>> log, ModColor.Str("W.A.R.N.I.N.G: Your FFTs are too small. We will pad it %.2f x "\ + "instead of %.2f x" % (float(NpixPaddedGrid)/NpixFacet, self.Padding), + col="yellow") + diam = NpixFacet * self.CellSizeRad diamPadded = NpixPaddedGrid * self.CellSizeRad RadiusFacet = diam * 0.5 @@ -353,6 +361,7 @@ def setFacetsLocs(self): self.Npix = Npix self.OutImShape = (self.nch, self.npol, self.Npix, self.Npix) _, NpixPaddedGrid = EstimateNpix(NpixFacet, Padding=Padding) + self.NpixPaddedFacet = NpixPaddedGrid self.NpixFacet = NpixFacet self.FacetShape = (self.nch, self.npol, NpixFacet, NpixFacet) @@ -733,8 +742,8 @@ def _createGridMachine(self, iFacet, **kw): FacetInfo["DicoConfigGM"]["NPix"], FacetInfo["lmShift"], iFacet, self.SpheNorm, self.VS.NFreqBands, - self.VS.StokesConverter.AvailableCorrelationProductsIds(), - self.VS.StokesConverter.RequiredStokesProductsIds(), + self.StokesConverter.AvailableCorrelationProductsIds(), + self.StokesConverter.RequiredStokesProductsIds(), **kw) def ToCasaImage(self, ImageIn, Fits=True, ImageName=None, diff --git a/DDFacet/Parset/DefaultParset.cfg b/DDFacet/Parset/DefaultParset.cfg index f165ced6..02cc5518 100755 --- a/DDFacet/Parset/DefaultParset.cfg +++ b/DDFacet/Parset/DefaultParset.cfg @@ -62,6 +62,11 @@ Images = DdPAMRIikz # Combination of letter codes indicating w alphathreshold = 7 # Multiple of the RMS in final residual which determines threshold for fitting alpha map. #metavar:N #type:int alphamaskthreshold = 15 # Multiple of the RMS in final residual which determines threshold for creating the alpha map mask (i.e. by thresholding the restored image). #metavar:N #type:int +StokesResidues = I # After cleaning Stokes I, output specified residues if [r] or [R] is specified in option Output-Images. Note that the imager + does not perform deconvolution on any Stokes products other than I - it + only outputs residues. + #options:I|V|Q|U|IQ|QI|IU|UI|IV|VI|UQ|QU|QV|VQ|UV|VU|IQU|IUQ|UIQ|UQI|QUI|QIU|IQV|IVQ|VIQ|VQI|QVI|QIV|IUV|IVU|VIU|VUI|UVI|UIV|QUV|QVU|VQU|VUQ|UVQ|UQV|IQUV|IQVU|IUQV|IUVQ|IVQU|IVUQ|QIUV|QIVU|VIUQ|VIQU|UIVQ|UIQV|QUIV|UQIV|UVIQ|VUIQ|VQIU|QVIU|QUVI|UQVI|UVQI|VUQI|VQUI|QVUI + [Image] _Help = General imager settings @@ -214,7 +219,7 @@ PSFBox = auto # determines the size of the PSF subtraction box us Use "auto" (or "sidelobe") for a Clark-CLEAN-style box taken out to a certain sidelobe (faster). Use "full" to subtract the full PSF, Hogbom-style (more accurate, can also combine with --Image-PSFOversize for maximum accuracy). Use an integer number to set an explicit box radius, in pixels. (HMP) #metavar:BOX - + [Mask] _Help = Masking options. The logic being Mask_{i+1} = ExternalMask | ResidualMask | Mask_{i} External = None # External clean mask image (FITS format). #metavar:FILENAME diff --git a/DDFacet/ToolsDir/ModToolBox.py b/DDFacet/ToolsDir/ModToolBox.py index 16294c19..99fe269c 100644 --- a/DDFacet/ToolsDir/ModToolBox.py +++ b/DDFacet/ToolsDir/ModToolBox.py @@ -41,13 +41,13 @@ # def EstimateNpix(Npix,Padding=1): # Npix=int(round(Npix)) - + # NpixOrig=Npix # if Npix%2!=0: Npix+=1 # Npix=ModToolBox.GiveClosestFastSize(Npix) # NpixOpt=Npix - - + + # Npix*=Padding # Npix=int(round(Npix)) # if Npix%2!=0: Npix+=1 @@ -55,7 +55,13 @@ # print ModColor.Str("(NpixOrig, NpixOpt, NpixOptPadded): %i --> %i --> %i"%(NpixOrig,NpixOpt,Npix)) # return NpixOpt,Npix -def EstimateNpix(Npix,Padding=1): +def EstimateNpix(Npix, + Padding=1.0, + min_size_fft=513): + """ Picks image size from the list of fast FFT sizes. + To avoid spectral leakage the number of taps in the FFT + must not be too small. + """ Npix=int(round(Npix)) Odd=True @@ -64,14 +70,15 @@ def EstimateNpix(Npix,Padding=1): #if Npix%2==0: Npix+=1 Npix=GiveClosestFastSize(Npix,Odd=Odd) NpixOpt=Npix - - - Npix*=Padding + + + Npix *= Padding + if Npix < min_size_fft: + Npix = min_size_fft Npix=int(round(Npix)) #if Npix%2!=0: Npix+=1 #if Npix%2==0: Npix+=1 Npix=GiveClosestFastSize(Npix,Odd=Odd) - #print>>log, ModColor.Str("With padding=%f: (NpixOrig, NpixOpt, NpixOptPadded): %i --> %i --> %i"%(Padding,NpixOrig,NpixOpt,Npix)) return NpixOpt,Npix class FFTW_Convolve(): @@ -115,7 +122,7 @@ def __init__(self,A): self.fft_FORWARD = pyfftw.FFTW(self.a, self.b,direction="FFTW_FORWARD",flags=('FFTW_ESTIMATE', ))#,threads=4) self.fft_BACKWARD = pyfftw.FFTW(self.a, self.b,direction="FFTW_BACKWARD",flags=('FFTW_ESTIMATE', ))#,threads=4) #print>>log, "done" - + def fft(self,A): #log=MyLogger.getLogger("ModToolBox.FFTM2.fft") self.a[:] = iFs(A.astype(self.ThisType),axes=-1) diff --git a/setup.cfg b/setup.cfg index 916e7c80..8fe81427 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,9 +10,9 @@ compopts="" #compopts=-DENABLE_NATIVE_TUNING=ON -ENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release [build] -compopts="" +#compopts="" #See [install] comment -#compopts=-DENABLE_NATIVE_TUNING=ON -ENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release +compopts=-DENABLE_NATIVE_TUNING=ON -DENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release [bdist_wheel] universal=0 From 7eea2ad78e068b1c1112573b3507e5df4d942cb9 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Tue, 11 Jul 2017 08:51:41 +0200 Subject: [PATCH 11/58] Stokes Residue Maps Adds support to image stokes I,Q,U,V residue maps after cleaning. Simply add residual maps to the list of output images and/or cubes and specify which stokes you want to output through Output-StokesResidues --- DDFacet/Imager/ClassDeconvMachine.py | 21 +++++++++++---------- DDFacet/Imager/ClassFacetMachine.py | 5 +++-- setup.cfg | 6 +++--- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index 91d73852..aa616956 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -258,7 +258,6 @@ def Init(self): # and proceed with background tasks self.VS.CalcWeightsBackground() self.FacetMachine and self.FacetMachine.initCFInBackground() - self.StokesFacetMachine and self.StokesFacetMachine.initCFInBackground(other_fm=self.FacetMachine) # FacetMachinePSF will skip CF init if they match those of FacetMachine if self.FacetMachinePSF is not None: self.FacetMachinePSF.initCFInBackground(other_fm=self.FacetMachine) @@ -271,7 +270,8 @@ def CreateFacetMachines (self): self.StokesFacetMachine = ClassFacetMachine(self.VS, self.GD, Precision=self.Precision, - PolMode=self.GD["Output"]["StokesResidues"]) + PolMode=self.GD["Output"]["StokesResidues"], + custom_id="STOKESFM") self.StokesFacetMachine.appendMainField(ImageName="%s.image"%self.BaseName,**MainFacetOptions) self.StokesFacetMachine.Init() @@ -1209,11 +1209,11 @@ def _dump_stokes_residues(self): self.VS.startChunkLoadInBackground() # init new grids for Stokes residues - self.FacetMachine.initCFInBackground() - self.StokesFacetMachine.initCFInBackground() self.StokesFacetMachine.Init() + self.FacetMachine.Init() self.StokesFacetMachine.ReinitDirty() - + self.FacetMachine.initCFInBackground() + self.StokesFacetMachine.initCFInBackground(other_fm=self.FacetMachine) #same weighting map and CF support current_model_freqs = np.array([]) #invalidate while True: # note that collectLoadedChunk() will destroy the current DATA dict, so we must make sure @@ -1238,6 +1238,7 @@ def _dump_stokes_residues(self): model_freqs = DATA["FreqMappingDegrid"] # switch subband if necessary + self.FacetMachine.awaitInitCompletion() if not np.array_equal(model_freqs, current_model_freqs): ModelImage = self.FacetMachine.setModelImage(self.DeconvMachine.GiveModelImage(model_freqs)) # write out model image, if asked to @@ -1245,7 +1246,6 @@ def _dump_stokes_residues(self): print>>log,"model image @%s MHz (min,max) = (%f, %f)"%(str(model_freqs/1e6),ModelImage.min(),ModelImage.max()) else: print>>log,"reusing model image from previous chunk" - if self.PredictMode == "BDA-degrid" or self.PredictMode == "DeGridder": self.FacetMachine.getChunkInBackground(DATA) elif self.PredictMode == "Montblanc": @@ -1262,6 +1262,7 @@ def _dump_stokes_residues(self): self.FacetMachine.collectDegriddingResults() # Grid residue vis + self.StokesFacetMachine.awaitInitCompletion() self.StokesFacetMachine.putChunkInBackground(DATA) # if Smooth beam enabled, either compute it from the stack, or get it from cache @@ -1275,12 +1276,12 @@ def _dump_stokes_residues(self): # All done dump the stokes residues if "r" in self._saveims: self.StokesFacetMachine.ToCasaImage(self.DicoDirty["MeanImage"], - ImageName="%s.stokes.residual"%self.BaseName, + ImageName="%s.stokes.app.residual"%self.BaseName, Fits=True, Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) if "r" in self._savecubes: self.FacetMachine.ToCasaImage(self.DicoDirty["ImageCube"], - ImageName="%s.cube.stokes.residual"%self.BaseName, + ImageName="%s.cube.stokes.app.residual"%self.BaseName, Fits=True, Freqs=self.VS.FreqBandCenters, Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) @@ -1293,12 +1294,12 @@ def _dump_stokes_residues(self): if "R" in self._saveims: MeanCorr = np.mean(DirtyCorr, axis=0).reshape((1, npol, nx, ny)) self.FacetMachine.ToCasaImage(MeanCorr, - ImageName="%s.stokes.corr.residual"%self.BaseName, + ImageName="%s.stokes.int.residual"%self.BaseName, Fits=True, Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) if "R" in self._savecubes: self.FacetMachine.ToCasaImage(DirtyCorr, - ImageName="%s.cube.stokes.corr.residual"%self.BaseName, + ImageName="%s.cube.stokes.int.residual"%self.BaseName, Fits=True, Freqs=self.VS.FreqBandCenters, Stokes=self.StokesFacetMachine.StokesConverter.RequiredStokesProducts()) diff --git a/DDFacet/Imager/ClassFacetMachine.py b/DDFacet/Imager/ClassFacetMachine.py index 15b72b4d..82479645 100644 --- a/DDFacet/Imager/ClassFacetMachine.py +++ b/DDFacet/Imager/ClassFacetMachine.py @@ -76,6 +76,7 @@ def __init__(self, PointingID=0, DoPSF=False, Oversize=1, # factor by which image is oversized + custom_id=None ): self.HasFourierTransformed = False @@ -104,7 +105,7 @@ def __init__(self, if APP is not None: APP.registerJobHandlers(self) self._fft_job_counter = APP.createJobCounter("fft") - self._app_id = "FMPSF" if DoPSF else "FM" + self._app_id = ("FMPSF" if DoPSF else "FM") if custom_id is None else custom_id DicoConfigGM = {} self.DicoConfigGM = DicoConfigGM @@ -652,7 +653,7 @@ def _initcf_worker (self, iFacet, facet_dict, cachepath, cachevalid, wmax): ClassDDEGridMachine.ClassDDEGridMachine.verifyCFDict(facet_dict, self.GD["CF"]["Nw"]) return "cached",path,iFacet except: - print>>log,traceback.format_exc() + #print>>log,traceback.format_exc() #confusing... print>>log, "Error loading %s, will re-generate"%path facet_dict.delete() # ok, regenerate the terms at this point diff --git a/setup.cfg b/setup.cfg index 8fe81427..95f4f56a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,12 +7,12 @@ compopts="" # or enabling architecture dependent optimizations during compile time. # TAKE NOTE::: disable before pusing a sdist and bdist_wheel to pypi - supposed to #be architecture independent!!! -#compopts=-DENABLE_NATIVE_TUNING=ON -ENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release +#compopts=-DENABLE_NATIVE_TUNING=ON -DENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release [build] -#compopts="" +compopts="" #See [install] comment -compopts=-DENABLE_NATIVE_TUNING=ON -DENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release +#compopts=-DENABLE_NATIVE_TUNING=ON -DENABLE_FAST_MATH=ON -DCMAKE_BUILD_TYPE=Release [bdist_wheel] universal=0 From 2626e2b9357f6cb4a1092ce71f955131f7c3aed1 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Wed, 12 Jul 2017 13:33:08 +0200 Subject: [PATCH 12/58] Add stokes residues to 3C147 test. Add another test w/o beam --- .../VeryLongAcceptanceTests/Test3C147.py | 82 ++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/DDFacet/Tests/VeryLongAcceptanceTests/Test3C147.py b/DDFacet/Tests/VeryLongAcceptanceTests/Test3C147.py index 596e7f03..4f0fddb4 100644 --- a/DDFacet/Tests/VeryLongAcceptanceTests/Test3C147.py +++ b/DDFacet/Tests/VeryLongAcceptanceTests/Test3C147.py @@ -24,8 +24,88 @@ class Test3C147(DDFacet.Tests.ShortAcceptanceTests.ClassCompareFITSImage.ClassCompareFITSImage): + @classmethod + def defineImageList(cls): + """ Method to define set of reference images to be tested. + Can be overridden to add additional output products to the test. + These must correspond to whatever is used in writing out the FITS files (eg. those in ClassDeconvMachine.py) + Returns: + List of image identifiers to reference and output products + """ + return ['dirty', 'dirty.corr', 'psf', 'NormFacets', 'Norm', + 'int.residual', 'app.residual', 'int.model', 'app.model', + 'int.convmodel', 'app.convmodel', 'int.restored', 'app.restored', + 'restored', 'stokes.app.residual', 'stokes.int.residual'] + + @classmethod + def defineMaxSquaredError(cls): + """ Method defining maximum error tolerance between any pair of corresponding + pixels in the output and corresponding reference FITS images. + Should be overridden if another tolerance is desired + Returns: + constant for maximum tolerance used in test case setup + """ + return [1e-6,1e-6,1e-6,1e-6,1e-6, + 1e-3,1e-4,1e-3,1e-4, + 1e-3,1e-4,1e-3,1e-4, + 1e-1,1e-4,1e-3] #epsilons per image pair, as listed in defineImageList + + @classmethod + def defMeanSquaredErrorLevel(cls): + """ Method defining maximum tolerance for the mean squared error between any + pair of FITS images. Should be overridden if another tolerance is + desired + Returns: + constant for tolerance on mean squared error + """ + return [1e-7,1e-7,1e-7,1e-7,1e-7, + 1e-5,1e-5,1e-5,1e-5, + 1e-5,1e-5,1e-5,1e-5, + 1e-5,1e-5,1e-5] #epsilons per image pair, as listed in defineImageList + pass #check all images +class Test3C147WOBeam(DDFacet.Tests.ShortAcceptanceTests.ClassCompareFITSImage.ClassCompareFITSImage): + @classmethod + def defineImageList(cls): + """ Method to define set of reference images to be tested. + Can be overridden to add additional output products to the test. + These must correspond to whatever is used in writing out the FITS files (eg. those in ClassDeconvMachine.py) + Returns: + List of image identifiers to reference and output products + """ + return ['dirty', 'dirty.corr', 'psf', 'NormFacets', 'Norm', + 'app.residual', 'app.model', + 'app.convmodel', 'app.restored', + 'restored', 'stokes.app.residual'] + + @classmethod + def defineMaxSquaredError(cls): + """ Method defining maximum error tolerance between any pair of corresponding + pixels in the output and corresponding reference FITS images. + Should be overridden if another tolerance is desired + Returns: + constant for maximum tolerance used in test case setup + """ + return [1e-6,1e-6,1e-6,1e-6,1e-6, + 1e-4,1e-4, + 1e-4,1e-4, + 1e-1,1e-4] #epsilons per image pair, as listed in defineImageList + + @classmethod + def defMeanSquaredErrorLevel(cls): + """ Method defining maximum tolerance for the mean squared error between any + pair of FITS images. Should be overridden if another tolerance is + desired + Returns: + constant for tolerance on mean squared error + """ + return [1e-7,1e-7,1e-7,1e-7,1e-7, + 1e-5,1e-5, + 1e-5,1e-5, + 1e-5,1e-5] #epsilons per image pair, as listed in defineImageList + + pass #check all images if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() From 308ca5a21d853842a1993519fd41de204ee1d83a Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 13 Jul 2017 09:58:48 +0100 Subject: [PATCH 13/58] Re-enable ConvolveGaussianScipy for SSD --- DDFacet/ToolsDir/ModFFTW.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/DDFacet/ToolsDir/ModFFTW.py b/DDFacet/ToolsDir/ModFFTW.py index 8650fc8a..5f945dc2 100644 --- a/DDFacet/ToolsDir/ModFFTW.py +++ b/DDFacet/ToolsDir/ModFFTW.py @@ -281,24 +281,24 @@ def GiveGauss(Npix,CellSizeRad=None,GaussPars=(0.,0.,0.),dtype=np.float32,parall #Gauss/=np.sum(Gauss) return Gauss -#def ConvolveGaussianScipy(Ain0,Sig=1.,GaussPar=None): -# warnings.warn("deprecated: this wont work for small ffts...", -# DeprecationWarning) -# Npix=int(2*8*Sig) -# if Npix%2==0: Npix+=1 -# x0=Npix/2 -# x,y=np.mgrid[-x0:x0:Npix*1j,-x0:x0:Npix*1j] -# #in2=np.exp(-(x**2+y**2)/(2.*Sig**2)) -# if GaussPar is None: -# GaussPar=(Sig,Sig,0) -# in2=Gaussian.Gaussian2D(x,y,GaussPar=GaussPar) - -# nch,npol,_,_=Ain0.shape -# Out=np.zeros_like(Ain0) -# for ch in range(nch): -# in1=Ain0[ch,0] -# Out[ch,0,:,:]=scipy.signal.fftconvolve(in1, in2, mode='same').real -# return Out,in2 +def ConvolveGaussianScipy(Ain0,Sig=1.,GaussPar=None): + #warnings.warn("deprecated: this wont work for small ffts...", + # DeprecationWarning) + Npix=int(2*8*Sig) + if Npix%2==0: Npix+=1 + x0=Npix/2 + x,y=np.mgrid[-x0:x0:Npix*1j,-x0:x0:Npix*1j] + #in2=np.exp(-(x**2+y**2)/(2.*Sig**2)) + if GaussPar is None: + GaussPar=(Sig,Sig,0) + in2=Gaussian.Gaussian2D(x,y,GaussPar=GaussPar) + + nch,npol,_,_=Ain0.shape + Out=np.zeros_like(Ain0) + for ch in range(nch): + in1=Ain0[ch,0] + Out[ch,0,:,:]=scipy.signal.fftconvolve(in1, in2, mode='same').real + return Out,in2 def learnFFTWWisdom(npix,dtype=np.float32): From 1d815790d805eba7a3d6589362e189bec999d9cb Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 13 Jul 2017 09:59:32 +0100 Subject: [PATCH 14/58] Fix up HMP masking --- DDFacet/Imager/ClassImageNoiseMachine.py | 29 ++++++++++++------------ 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/DDFacet/Imager/ClassImageNoiseMachine.py b/DDFacet/Imager/ClassImageNoiseMachine.py index 35b402f1..d6a87bf3 100644 --- a/DDFacet/Imager/ClassImageNoiseMachine.py +++ b/DDFacet/Imager/ClassImageNoiseMachine.py @@ -109,7 +109,7 @@ def setPSF(self,DicoVariablePSF): self.DicoVariablePSF=DicoVariablePSF def giveBrutalRestored(self,DicoResidual): - print>>log," Running Brutal HMP..." + print>>log," Running Brutal deconvolution..." ListSilentModules=["ClassImageDeconvMachineMSMF","ClassPSFServer","ClassMultiScaleMachine","GiveModelMachine", "ClassModelMachineMSMF", "ClassImageDeconvMachineHogbom", "ClassModelMachineHogbom"] # MyLogger.setSilent(ListSilentModules) @@ -117,8 +117,6 @@ def giveBrutalRestored(self,DicoResidual): self.Orig_MeanDirty=self.DicoDirty["MeanImage"].copy() self.Orig_Dirty=self.DicoDirty["ImageCube"].copy() GD=copy.deepcopy(self.GD) - # take any reference frequency - doesn't matter - #self.RefFreq=np.mean(self.DicoVariablePSF["freqs"][0]) self.RefFreq=self.ExternalModelMachine.RefFreq self.GD=GD #self.GD["Parallel"]["NCPU"]=1 @@ -161,14 +159,18 @@ def giveBrutalRestored(self,DicoResidual): MinorCycleConfig["NFreqBands"]=self.NFreqBands MinorCycleConfig["GD"] = self.GD #MinorCycleConfig["RefFreq"] = self.RefFreq - ModConstructor = ClassModModelMachine(self.GD) - ModelMachine = ModConstructor.GiveMM(Mode=self.GD["Deconv"]["Mode"]) - ModelMachine.setRefFreq(self.RefFreq) - MinorCycleConfig["ModelMachine"]=ModelMachine #MinorCycleConfig["CleanMaskImage"]=None self.MinorCycleConfig=MinorCycleConfig - if self.GD["Deconv"]["Mode"]=="HMP": + if self.GD["Deconv"]["Mode"] in ["HMP","SSD"]: + # for SSD we need to set up the HMP ModelMachine. + self.GD["Deconv"]["Mode"]="HMP" + ModConstructor = ClassModModelMachine(self.GD) + ModelMachine = ModConstructor.GiveMM(Mode=self.GD["Deconv"]["Mode"]) + ModelMachine.setRefFreq(self.RefFreq) + MinorCycleConfig["ModelMachine"]=ModelMachine + self.MinorCycleConfig=MinorCycleConfig from DDFacet.Imager.MSMF import ClassImageDeconvMachineMSMF + self.DeconvMachine=ClassImageDeconvMachineMSMF.ClassImageDeconvMachine(MainCache=self.MainCache, CacheSharedMode=True, ParallelMode=False, @@ -182,7 +184,7 @@ def giveBrutalRestored(self,DicoResidual): CacheFileName="HMP_Masking", **self.MinorCycleConfig) else: - NotImplementedError("Mode %s not compatible with automasking" % self.GD["Deconv"]["Mode"]) + raise NotImplementedError("Mode %s not compatible with automasking" % self.GD["Deconv"]["Mode"]) self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["EstimatesAvgPSF"][-1], GridFreqs=self.GridFreqs, DegridFreqs=self.DegridFreqs, RefFreq=self.RefFreq) @@ -227,7 +229,7 @@ def giveBrutalRestored(self,DicoResidual): ModelImage=Model[0,0] print>>log," Convolving image with beam %s..."%str(self.DicoVariablePSF["EstimatesAvgPSF"][1]) - from DDFacet.ToolsDir import Gaussian + #from DDFacet.ToolsDir import Gaussian # Sig_rad=np.max(self.DicoVariablePSF["EstimatesAvgPSF"][1][0:2]) @@ -254,9 +256,8 @@ def giveBrutalRestored(self,DicoResidual): # # ModelConv=scipy.signal.convolve2d(ModelImage,G,mode="same") - - ModelConv=ModFFTW.ConvolveGaussian(Model, CellSizeRad=self.DicoDirty["ImageInfo"]["CellSizeRad"], - GaussPars=[self.DicoVariablePSF["EstimatesAvgPSF"][1]]) + ModelConv=ModFFTW.ConvolveGaussian({0:Model},0,0,0, CellSizeRad=self.DicoDirty["ImageInfo"]["CellSizeRad"], + GaussPars_ch=self.DicoVariablePSF["EstimatesAvgPSF"][1]) #GaussPar=[i*5 for i in self.DicoVariablePSF["EstimatesAvgPSF"][1]] #ModelConv+=ModFFTW.ConvolveGaussian(Model, CellSizeRad=self.DicoDirty["ImageInfo"]["CellSizeRad"], @@ -272,5 +273,5 @@ def giveBrutalRestored(self,DicoResidual): self.DicoDirty["MeanImage"][...]=self.Orig_MeanDirty[...] self.DicoDirty["ImageCube"][...]=self.Orig_Dirty[...] - MyLogger.setLoud(ListSilentModules) + #MyLogger.setLoud(ListSilentModules) return self.Restored From 54e764fc04b17d5047477273f2c06986d67f77fc Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 13 Jul 2017 13:53:18 +0100 Subject: [PATCH 15/58] Get SSD working --- .../Imager/SSD/ClassImageDeconvMachineSSD.py | 8 +++++- DDFacet/Imager/SSD/ClassInitSSDModel.py | 27 ++++++++++++++++--- DDFacet/Imager/SSD/ClassModelMachineSSD.py | 2 +- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py b/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py index 2db0a2dc..70f38d88 100644 --- a/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py +++ b/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py @@ -134,6 +134,10 @@ def Init(self,**kwargs): self.DicoVariablePSF["PSFSideLobes"]=kwargs["PSFAve"] self.setSideLobeLevel(kwargs["PSFAve"][0], kwargs["PSFAve"][1]) self.ModelMachine.setRefFreq(kwargs["RefFreq"]) + # store grid and degrid freqs for ease of passing to MSMF + print kwargs["GridFreqs"],kwargs["DegridFreqs"] + self.GridFreqs=kwargs["GridFreqs"] + self.DegridFreqs=kwargs["DegridFreqs"] self.ModelMachine.setFreqMachine(kwargs["GridFreqs"], kwargs["DegridFreqs"]) def AdaptArrayShape(self,A,Nout): @@ -288,7 +292,7 @@ def InitMSMF(self): - print>>log," selected %i islands larger that %i pixels for HMP initialisation"%(np.count_nonzero(ListDoMSMFIslandsInit),self.GD["SSDClean"]["MinSizeInitHMP"]) + print>>log," selected %i islands larger than %i pixels for HMP initialisation"%(np.count_nonzero(ListDoMSMFIslandsInit),self.GD["SSDClean"]["MinSizeInitHMP"]) @@ -299,6 +303,8 @@ def InitMSMF(self): self.DicoVariablePSF, self.DicoDirty, self.ModelMachine.RefFreq, + self.GridFreqs, + self.DegridFreqs, MainCache=self.maincache, NCPU=self.NCPU, IdSharedMem=self.IdSharedMem) diff --git a/DDFacet/Imager/SSD/ClassInitSSDModel.py b/DDFacet/Imager/SSD/ClassInitSSDModel.py index de69f3a6..3b3aec17 100644 --- a/DDFacet/Imager/SSD/ClassInitSSDModel.py +++ b/DDFacet/Imager/SSD/ClassInitSSDModel.py @@ -19,11 +19,13 @@ class ClassInitSSDModelParallel(): - def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq,MainCache=None,NCPU=1,IdSharedMem=""): + def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq,GridFreqs,DegridFreqs,MainCache=None,NCPU=1,IdSharedMem=""): self.DicoVariablePSF=DicoVariablePSF self.DicoDirty=DicoDirty GD=copy.deepcopy(GD) self.RefFreq=RefFreq + self.GridFreqs=GridFreqs + self.DegridFreqs=DegridFreqs self.MainCache=MainCache self.GD=GD self.NCPU=NCPU @@ -33,6 +35,8 @@ def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq,MainCache=None,NCPU=1,IdS self.DicoVariablePSF, self.DicoDirty, self.RefFreq, + self.GridFreqs, + self.DegridFreqs, MainCache=self.MainCache, IdSharedMem=self.IdSharedMem) @@ -69,6 +73,8 @@ def giveDicoInitIndiv(self,ListIslands,ListDoIsland=None,Parallel=True): self.DicoVariablePSF, self.DicoDirty, self.RefFreq, + self.GridFreqs, + self.DegridFreqs, self.MainCache, self.ModelImage, ListIslands, @@ -126,7 +132,7 @@ def giveDicoInitIndiv(self,ListIslands,ListDoIsland=None,Parallel=True): ###################################################################################################### class ClassInitSSDModel(): - def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq, + def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq,GridFreqs,DegridFreqs, MainCache=None, IdSharedMem="", DoWait=False, @@ -135,6 +141,8 @@ def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq, self.DicoDirty=DicoDirty GD=copy.deepcopy(GD) self.RefFreq=RefFreq + self.GridFreqs=GridFreqs + self.DegridFreqs=DegridFreqs self.GD=GD self.GD["Parallel"]["NCPU"]=1 #self.GD["HMP"]["Alpha"]=[0,0,1]#-1.,1.,5] @@ -166,6 +174,9 @@ def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq, MinorCycleConfig["NCPU"]=self.GD["Parallel"]["NCPU"] MinorCycleConfig["NFreqBands"]=self.NFreqBands MinorCycleConfig["GD"] = self.GD + MinorCycleConfig["GridFreqs"] = self.GridFreqs + MinorCycleConfig["DegridFreqs"] = self.DegridFreqs + #MinorCycleConfig["RefFreq"] = self.RefFreq ModConstructor = ClassModModelMachine(self.GD) @@ -195,7 +206,8 @@ def __init__(self,GD,DicoVariablePSF,DicoDirty,RefFreq, self.MeanDirty=DicoDirty["MeanImage"] #print "Start 3" - self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["PSFSideLobes"],DoWait=DoWait) + self.DeconvMachine.Init(PSFVar=self.DicoVariablePSF,PSFAve=self.DicoVariablePSF["PSFSideLobes"], + GridFreqs=self.GridFreqs,DegridFreqs=self.DegridFreqs,DoWait=DoWait) if DoWait: print "IINit3" @@ -338,6 +350,7 @@ def giveModel(self,ListPixParms): self.ModelMachine=ModelMachine #self.ModelMachine.DicoSMStacked=self.DicoBasicModelMachine self.ModelMachine.setRefFreq(self.RefFreq,Force=True) + self.ModelMachine.setFreqMachine(self.GridFreqs,self.DegridFreqs) self.MinorCycleConfig["ModelMachine"] = ModelMachine self.ModelMachine.setModelShape(self.SubDirty.shape) self.ModelMachine.setListComponants(self.DeconvMachine.ModelMachine.ListScales) @@ -413,7 +426,7 @@ def giveModel(self,ListPixParms): fMult=1. SModel=ModelImage[0,0,x,y]*fMult - AModel=self.ModelMachine.GiveSpectralIndexMap(DoConv=False,MaxDR=1e3)[0,0,x,y] + AModel=self.ModelMachine.GiveSpectralIndexMap()[0,0,x,y] T.timeit("spec index") return SModel,AModel @@ -434,6 +447,8 @@ def __init__(self, DicoVariablePSF, DicoDirty, RefFreq, + GridFreqs, + DegridFreqs, MainCache, ModelImage, ListIsland, @@ -448,6 +463,8 @@ def __init__(self, self.DicoVariablePSF=DicoVariablePSF self.DicoDirty=DicoDirty self.RefFreq=RefFreq + self.GridFreqs=GridFreqs + self.DegridFreqs=DegridFreqs self.MainCache=MainCache self.ModelImage=ModelImage self.ListIsland=ListIsland @@ -464,6 +481,8 @@ def Init(self): self.DicoVariablePSF, self.DicoDirty, self.RefFreq, + self.GridFreqs, + self.DegridFreqs, MainCache=self.MainCache, IdSharedMem=self.IdSharedMem, DoWait=False, diff --git a/DDFacet/Imager/SSD/ClassModelMachineSSD.py b/DDFacet/Imager/SSD/ClassModelMachineSSD.py index 9dc56919..ce8df57b 100644 --- a/DDFacet/Imager/SSD/ClassModelMachineSSD.py +++ b/DDFacet/Imager/SSD/ClassModelMachineSSD.py @@ -416,7 +416,7 @@ def ToNPYModel(self,FitsFile,SkyModel,BeamImage=None): #ExcludeCat=R.CatExclude - AlphaMap=self.GiveSpectralIndexMap(DoConv=False) + AlphaMap=self.GiveSpectralIndexMap() ModelMap=self.GiveModelImage() nch,npol,_,_=ModelMap.shape From 6abc48fe2d15bdd9aef5de66da7bac001996f654 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 13 Jul 2017 19:03:55 +0100 Subject: [PATCH 16/58] Fix for convexify island bug --- .../Imager/SSD/ClassIslandDistanceMachine.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/DDFacet/Imager/SSD/ClassIslandDistanceMachine.py b/DDFacet/Imager/SSD/ClassIslandDistanceMachine.py index 4a6bf642..f3bc8fc4 100644 --- a/DDFacet/Imager/SSD/ClassIslandDistanceMachine.py +++ b/DDFacet/Imager/SSD/ClassIslandDistanceMachine.py @@ -11,6 +11,48 @@ from scipy.spatial import ConvexHull from matplotlib.path import Path +class Island(object): + ''' + helper class for MergeIsland + ''' + def __init__(self,island): + if isinstance(island,set): + self.ilist=[[j[0],j[1]] for j in island] + self.iset=island + else: + self.iset=None + self.ilist=island + self.npa=np.array(self.ilist) + self.minx=np.min(self.npa[:,0]) + self.maxx=np.max(self.npa[:,0]) + self.miny=np.min(self.npa[:,1]) + self.maxy=np.max(self.npa[:,1]) + self.merged=False + self.new=False + def overlap(self,other): + # check if the bounding box overlaps + if (other.maxxself.maxx or other.miny>self.maxy): + return False + return True + def make_set(self): + if self.iset is None: + self.iset=set(((j[0],j[1]) for j in self.ilist)) + def intersect(self,other): + self.make_set() + other.make_set() + inter=self.iset.intersection(other.iset) + return len(inter)>0 + def merge(self,other): + self.make_set() + other.make_set() + merged=self.iset.union(other.iset) + other.merged=True + return Island(merged) + def plot(self,**kwargs): + plt.scatter(self.npa[:,0],self.npa[:,1],**kwargs) + + class ClassIslandDistanceMachine(): def __init__(self,GD,MaskArray,PSFServer,DicoDirty,IdSharedMem=""): self.GD=GD @@ -322,6 +364,27 @@ def ConvexifyIsland(self,ListIslands): return ListConvexIslands + def MergeIslands(self,ListIslands): + print>>log," Merge intersecting islands" + Islands=[Island(i) for i in ListIslands] + + i=0 + while i>log," %i islands remaining after merge" % len(result) + return result + def calcDistanceMatrixMinParallel(self,ListIslands,Parallel=True): NIslands=len(ListIslands) self.D=np.zeros((NIslands,NIslands),np.float32) From 7cfbb86217cf5d20c7ea36ed94addbcbff1e01a4 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 13 Jul 2017 19:04:20 +0100 Subject: [PATCH 17/58] Fix for convexify island bug --- DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py b/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py index 70f38d88..19caeb1b 100644 --- a/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py +++ b/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py @@ -231,7 +231,7 @@ def SearchIslands(self,Threshold): ListIslands=IslandDistanceMachine.CalcCrossIslandFlux(ListIslands) ListIslands=IslandDistanceMachine.ConvexifyIsland(ListIslands) - + ListIslands=IslandDistanceMachine.MergeIslands(ListIslands) self.LabelIslandsImage=IslandDistanceMachine.CalcLabelImage(ListIslands) From 040043592ff658cbcd7193f00c6ee2520628554d Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 13 Jul 2017 19:36:40 +0100 Subject: [PATCH 18/58] Freq machine hacks to work with one output channel --- DDFacet/Imager/ClassFrequencyMachine.py | 9 ++++++--- DDFacet/Imager/SSD/ClassModelMachineSSD.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/DDFacet/Imager/ClassFrequencyMachine.py b/DDFacet/Imager/ClassFrequencyMachine.py index aff32ad6..d6b869fb 100644 --- a/DDFacet/Imager/ClassFrequencyMachine.py +++ b/DDFacet/Imager/ClassFrequencyMachine.py @@ -44,7 +44,11 @@ class ClassFrequencyMachine(object): """ def __init__(self, Freqs, Freqsp, ref_freq, GD=None): self.Freqs = np.asarray(Freqs) - self.Freqsp = np.asarray(Freqsp) + # Use the longer of the two frequency arrays + if len(Freqs)>len(Freqsp): + self.Freqsp = np.asarray(Freqs) + else: + self.Freqsp = np.asarray(Freqs) self.nchan = self.Freqs.size self.nchan_degrid = self.Freqsp.size #print "Nchan =", self.nchan @@ -167,7 +171,6 @@ def FitAlphaMap(self, FitCube, threshold=0.1): # Create the design matrix p = 2 #polynomial order XDes = self.setDesMat(self.Freqsp, order=p, mode="log") - # Solve the system Sol = np.dot(np.linalg.inv(XDes.T.dot(XDes)), np.dot(XDes.T, logI)) logIref = Sol[0, :] @@ -314,4 +317,4 @@ def EvalGP(self, coeffs, Freqsp=None): elif np.all(Freqsp == self.ref_freq): return self.GP.RR_From_Coeffs_Degrid_ref(coeffs) else: - raise NotImplementedError('GPR mode only predicts to GridFreqs, DegridFreqs and ref_freq') \ No newline at end of file + raise NotImplementedError('GPR mode only predicts to GridFreqs, DegridFreqs and ref_freq') diff --git a/DDFacet/Imager/SSD/ClassModelMachineSSD.py b/DDFacet/Imager/SSD/ClassModelMachineSSD.py index ce8df57b..bbd0ad30 100644 --- a/DDFacet/Imager/SSD/ClassModelMachineSSD.py +++ b/DDFacet/Imager/SSD/ClassModelMachineSSD.py @@ -68,7 +68,7 @@ def setRefFreq(self,RefFreq,Force=False):#,AllFreqs): # print "ModelMachine:",self.RefFreq, self.DicoSMStacked["RefFreq"], self.DicoSMStacked["AllFreqs"] def setFreqMachine(self,GridFreqs, DegridFreqs): - # Initiaise the Frequency Machine + # Initialise the Frequency Machine self.FreqMachine = ClassFrequencyMachine.ClassFrequencyMachine(GridFreqs, DegridFreqs, self.DicoSMStacked["RefFreq"], self.GD) def ToFile(self,FileName,DicoIn=None): From 25f1f255859e6b346985c76be30f8ca2283911eb Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 14 Jul 2017 10:53:45 +0200 Subject: [PATCH 19/58] Verbose logging in tests Fixes #380 --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 1 + 1 file changed, 1 insertion(+) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index d0f85748..5825ddf3 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -204,6 +204,7 @@ def setUpClass(cls): args = ['DDF.py', cls._outputParsetFilename, + '--Debug-APPVerbose 2', '--Output-Name=%s' % cls._imagePrefix] stdout_file = open(cls._stdoutLogFile, 'w') From a5bb62705937d0025493452cca2281cda4810a29 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Tue, 18 Jul 2017 08:59:10 +0200 Subject: [PATCH 20/58] should fix the verbose level parsing --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index 5825ddf3..7e801e4f 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -204,7 +204,7 @@ def setUpClass(cls): args = ['DDF.py', cls._outputParsetFilename, - '--Debug-APPVerbose 2', + '--Debug-APPVerbose=2', '--Output-Name=%s' % cls._imagePrefix] stdout_file = open(cls._stdoutLogFile, 'w') From ffa74d7edea7a968947b78d7e3dbc1edd34bb680 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Tue, 18 Jul 2017 14:05:24 +0100 Subject: [PATCH 21/58] Crucial typo fix! --- DDFacet/Imager/ClassFrequencyMachine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Imager/ClassFrequencyMachine.py b/DDFacet/Imager/ClassFrequencyMachine.py index d6b869fb..94f1a189 100644 --- a/DDFacet/Imager/ClassFrequencyMachine.py +++ b/DDFacet/Imager/ClassFrequencyMachine.py @@ -48,7 +48,7 @@ def __init__(self, Freqs, Freqsp, ref_freq, GD=None): if len(Freqs)>len(Freqsp): self.Freqsp = np.asarray(Freqs) else: - self.Freqsp = np.asarray(Freqs) + self.Freqsp = np.asarray(Freqsp) self.nchan = self.Freqs.size self.nchan_degrid = self.Freqsp.size #print "Nchan =", self.nchan From 48b5ddddd991a257637c935c53e33da1d40ea728 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Tue, 5 Sep 2017 07:18:27 +0100 Subject: [PATCH 22/58] Remove reliance on old convolution routine, and tidy up --- DDFacet/Imager/MSMF/ClassMultiScaleMachine.py | 12 +-- DDFacet/ToolsDir/ModFFTW.py | 99 ++++++++++++++----- 2 files changed, 79 insertions(+), 32 deletions(-) diff --git a/DDFacet/Imager/MSMF/ClassMultiScaleMachine.py b/DDFacet/Imager/MSMF/ClassMultiScaleMachine.py index 7844f253..cbabd659 100644 --- a/DDFacet/Imager/MSMF/ClassMultiScaleMachine.py +++ b/DDFacet/Imager/MSMF/ClassMultiScaleMachine.py @@ -432,13 +432,13 @@ def MakeMultiScaleCube(self, verbose=False): Major=Scales[iScales]/(2.*np.sqrt(2.*np.log(2.))) Minor=Major/float(MinMajRatio) PSFGaussPars=(Major,Minor,th) - ThisSupport=int(np.max([Support,15*Major])) - if ThisSupport%2==0: ThisSupport+=1 - Gauss=ModFFTW.GiveGauss(ThisSupport,CellSizeRad=1.,GaussPars=PSFGaussPars) - ratio=np.sum(ModFFTW.GiveGauss(101,CellSizeRad=1.,GaussPars=PSFGaussPars))/np.sum(Gauss) + #ThisSupport=int(np.max([Support,15*Major])) + #if ThisSupport%2==0: ThisSupport+=1 + #Gauss=ModFFTW.GiveGauss(ThisSupport,CellSizeRad=1.,GaussPars=PSFGaussPars) + #ratio=np.sum(ModFFTW.GiveGauss(101,CellSizeRad=1.,GaussPars=PSFGaussPars))/np.sum(Gauss) #Gauss*=ratio #ThisPSF=ModFFTW.ConvolveGaussian(ThisMFPSF.reshape((nch,1,nx,ny)),CellSizeRad=1.,GaussPars=[PSFGaussPars]*self.NFreqBands)[:,0,:,:]#[0,0] - ThisPSF,Gauss=ModFFTW.ConvolveGaussianScipy(ThisMFPSF.reshape((nch,1,nx,ny)),Sig=Major,GaussPar=PSFGaussPars)#[0,0] + ThisPSF,Gauss=ModFFTW.ConvolveGaussianWrapper(ThisMFPSF.reshape((nch,1,nx,ny)),Sig=Major,GaussPar=PSFGaussPars)#[0,0] # import pylab # pylab.clf() @@ -456,7 +456,7 @@ def MakeMultiScaleCube(self, verbose=False): #fact=np.max(Gauss)/np.sum(Gauss) SumGauss=np.sum(Gauss) fact=1./SumGauss - fact=1./SumGauss#/(np.mean(np.max(np.max(ThisPSF,axis=-1),axis=-1))) + #fact=1./SumGauss#/(np.mean(np.max(np.max(ThisPSF,axis=-1),axis=-1))) #fact=1./Max Gauss*=fact#*ratio ThisPSF*=fact diff --git a/DDFacet/ToolsDir/ModFFTW.py b/DDFacet/ToolsDir/ModFFTW.py index 5f945dc2..dbc96551 100644 --- a/DDFacet/ToolsDir/ModFFTW.py +++ b/DDFacet/ToolsDir/ModFFTW.py @@ -281,25 +281,40 @@ def GiveGauss(Npix,CellSizeRad=None,GaussPars=(0.,0.,0.),dtype=np.float32,parall #Gauss/=np.sum(Gauss) return Gauss -def ConvolveGaussianScipy(Ain0,Sig=1.,GaussPar=None): - #warnings.warn("deprecated: this wont work for small ffts...", - # DeprecationWarning) - Npix=int(2*8*Sig) - if Npix%2==0: Npix+=1 - x0=Npix/2 - x,y=np.mgrid[-x0:x0:Npix*1j,-x0:x0:Npix*1j] - #in2=np.exp(-(x**2+y**2)/(2.*Sig**2)) - if GaussPar is None: - GaussPar=(Sig,Sig,0) - in2=Gaussian.Gaussian2D(x,y,GaussPar=GaussPar) - - nch,npol,_,_=Ain0.shape - Out=np.zeros_like(Ain0) - for ch in range(nch): - in1=Ain0[ch,0] - Out[ch,0,:,:]=scipy.signal.fftconvolve(in1, in2, mode='same').real - return Out,in2 - +#def ConvolveGaussianScipy(Ain0,Sig=1.,GaussPar=None): +# #warnings.warn("deprecated: this wont work for small ffts...", +# # DeprecationWarning) +# Npix=int(2*8*Sig) +# if Npix%2==0: Npix+=1 +# x0=Npix/2 +# x,y=np.mgrid[-x0:x0:Npix*1j,-x0:x0:Npix*1j] +# #in2=np.exp(-(x**2+y**2)/(2.*Sig**2)) +# if GaussPar is None: +# GaussPar=(Sig,Sig,0) +# in2=Gaussian.Gaussian2D(x,y,GaussPar=GaussPar) +# +# nch,npol,_,_=Ain0.shape +# Out=np.zeros_like(Ain0) +# for ch in range(nch): +# in1=Ain0[ch,0] +# Out[ch,0,:,:]=scipy.signal.fftconvolve(in1, in2, mode='same').real +# return Out,in2 + +def ConvolveGaussianWrapper(Ain0,Sig=1.0,GaussPar=None): + # a drop-in replacement for ConvolveGaussianScipy which uses + # _convolveSingleGaussianNP . The factor sqrt(2) here in 'pixel + # size' is a fudge to make the two routines agree: the + # Gaussian/Gaussian2D code appears to be missing the factor 2 on + # the denominator of the Gaussian function it computes + nch,npol,_,_=Ain0.shape + Out=np.zeros_like(Ain0) + dict={'in':Ain0,'out':Out} + if GaussPar is None: + GaussPar=(Sig,Sig,0) + for ch in range(nch): + Aout,PSF=_convolveSingleGaussianNP(dict,'in','out',ch,np.sqrt(2),GaussPar,return_gaussian=True) + Out[ch,:,:,:]=Aout + return Out,PSF def learnFFTWWisdom(npix,dtype=np.float32): """Learns FFTW wisdom for real 2D FFT of npix x npix images""" @@ -320,7 +335,8 @@ def _convolveSingleGaussianFFTW(shareddict, CellSizeRad, GaussPars_ch, Normalise = False, - nthreads = 1): + nthreads = 1, + return_gaussian = False): """Convolves a single channel in a cube of nchan, npol, Ny, Nx @param shareddict: a dictionary containing an input and output array of size [nchans, npols, Ny, Nx] @@ -329,7 +345,9 @@ def _convolveSingleGaussianFFTW(shareddict, same as field_in @param ch: index of channel to convolve @param CellSizeRad: pixel size in radians of the gaussian in image space - @param Normalize: Normalize the guassian amplitude + @param nthreads: number of threads to use in FFTW + @param Normalize: Normalize the gaussian amplitude + @param return_gaussian: return the convolving Gaussian as well """ # The FFT needs to be big enough to avoid spectral leakage in the # transforms, so we pad both sides of the stack of images with the same @@ -379,11 +397,17 @@ def _convolveSingleGaussianFFTW(shareddict, threads=nthreads))[pad_edge//2:pad_edge//2+npix_y, pad_edge//2:pad_edge//2+npix_x] T.timeit("convolve %d" % ch) - return Aout + + if return_gaussian: + return Aout,PSF + else: + return Aout # LAPACK / ATLAS-based convolution -def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, - GaussPars_ch, Normalise = False): +def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, + CellSizeRad, GaussPars_ch, + Normalise = False, return_gaussian = False): + """Convolves a single channel in a cube of nchan, npol, Ny, Nx @param shareddict: a dictionary containing an input and output array of size [nchans, npols, Ny, Nx] @@ -392,7 +416,8 @@ def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, same as field_in @param ch: index of channel to convolve @param CellSizeRad: pixel size in radians of the gaussian in image space - @param Normalize: Normalize the guassian amplitude + @param Normalize: Normalize the Gaussian amplitude + @param return_gaussian: return the convolving Gaussian as well """ # The FFT needs to be big enough to avoid spectral leakage in the # transforms, so we pad both sides of the stack of images with the same @@ -437,7 +462,10 @@ def _convolveSingleGaussianNP(shareddict, field_in, field_out, ch, CellSizeRad, pad_edge//2:npix_x+pad_edge//2] T.timeit("convolve %d" % ch) - return Aout + if return_gaussian: + return Aout,PSF + else: + return Aout ConvolveGaussian = _convolveSingleGaussianFFTW @@ -834,3 +862,22 @@ def ConvolveGaussianParallel(shareddict, field_in, field_out, CellSizeRad=None,G # out=Fs(A,axes=axes)#*(A.shape[-1]*A.shape[-2]) # return out +#def test_gaussian(): +# input = np.zeros((1,1,512,512)) +# input[0,0,250:262,250:262]=1 +# out,gaussian=ConvolveGaussianScipy(input,4.0) +# np.save('orig-out.npy',out) +# np.save('orig-gaussian.npy',gaussian) + +#def test_new_gaussian(): +# input = np.zeros((1,1,512,512)) +# input[0,0,250:262,250:262]=1 +# out,gaussian=ConvolveGaussianWrapper(input,4.0) +# np.save('new-out.npy',out) +# np.save('new-gaussian.npy',gaussian) + +#if __name__=='__main__': +# print 'Running test_gaussian' +# test_gaussian() +# print 'Running test_new_gaussian' +# test_new_gaussian() From 83653e6e105e12c444692e498f34707a45816cfc Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Sat, 9 Sep 2017 09:50:02 +0100 Subject: [PATCH 23/58] Fix problem loading CF caches with wmax=[] --- DDFacet/Array/NpShared.py | 2 +- DDFacet/Imager/ClassFacetMachine.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/DDFacet/Array/NpShared.py b/DDFacet/Array/NpShared.py index 65e772df..1948ab28 100644 --- a/DDFacet/Array/NpShared.py +++ b/DDFacet/Array/NpShared.py @@ -79,7 +79,7 @@ def CreateShared(Name, shape, dtype): def ToShared(Name, A): a = CreateShared(Name, A.shape, A.dtype) - a[:] = A[:] + np.copyto(a,A) return a def DelArray(Name): diff --git a/DDFacet/Imager/ClassFacetMachine.py b/DDFacet/Imager/ClassFacetMachine.py index 4d3d5dd4..d3c96bb3 100644 --- a/DDFacet/Imager/ClassFacetMachine.py +++ b/DDFacet/Imager/ClassFacetMachine.py @@ -652,8 +652,9 @@ def _initcf_worker (self, iFacet, facet_dict, cachepath, cachevalid, wmax): # validate dict ClassDDEGridMachine.ClassDDEGridMachine.verifyCFDict(facet_dict, self.GD["CF"]["Nw"]) return "cached",path,iFacet - except: + except Exception as e: #print>>log,traceback.format_exc() #confusing... + print>>log, 'Exception on Cache loading and checking was',str(e) print>>log, "Error loading %s, will re-generate"%path facet_dict.delete() # ok, regenerate the terms at this point From 7b84872c6e1d10ce95418c709946f52e9c3faae3 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Sat, 9 Sep 2017 14:33:36 +0100 Subject: [PATCH 24/58] fits header fix to stop CASA complaining, and add version --- .gitignore | 3 +++ DDFacet/DDF.py | 25 +------------------------ DDFacet/Imager/ClassCasaImage.py | 4 +++- DDFacet/report_version.py | 28 ++++++++++++++++++++++++++++ 4 files changed, 35 insertions(+), 25 deletions(-) create mode 100644 DDFacet/report_version.py diff --git a/.gitignore b/.gitignore index 476e233c..e8af2838 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# emacs backups +*~ + # Object files *.o *.ko diff --git a/DDFacet/DDF.py b/DDFacet/DDF.py index e597eb38..5676ad21 100755 --- a/DDFacet/DDF.py +++ b/DDFacet/DDF.py @@ -53,34 +53,11 @@ from DDFacet.Other import progressbar from DDFacet.Other.AsyncProcessPool import APP, WorkerProcessError import DDFacet.cbuild.Gridder._pyArrays as _pyArrays -from DDFacet.version import __version__ +from DDFacet.report_version import report_version log = None -# from version import __version__ import numpy as np -def report_version(): - # perhaps we are in a github with tags; in that case return describe - path = os.path.dirname(os.path.abspath(__file__)) - try: - # work round possible unavailability of git -C - result = subprocess.check_output('cd %s; git describe --tags' % path, shell=True, stderr=subprocess.STDOUT).rstrip() - except subprocess.CalledProcessError: - result = None - if result is not None: - # will succeed if tags exist - return result - else: - # perhaps we are in a github without tags? Cook something up if so - try: - result = subprocess.check_output('cd %s; git rev-parse --short HEAD' % path, shell=True, stderr=subprocess.STDOUT).rstrip() - except subprocess.CalledProcessError: - result = None - if result is not None: - return __version__+'-'+result - else: - # we are probably in an installed version - return __version__ # # ############################## # # Catch numpy warning diff --git a/DDFacet/Imager/ClassCasaImage.py b/DDFacet/Imager/ClassCasaImage.py index b5c1f74c..f9528f7f 100644 --- a/DDFacet/Imager/ClassCasaImage.py +++ b/DDFacet/Imager/ClassCasaImage.py @@ -29,6 +29,7 @@ log= MyLogger.getLogger("ClassCasaImage") from astropy.io import fits from astropy.wcs import WCS +from DDFacet.report_version import report_version def FileToArray(FileName,CorrT): """ Read a FITS FileName file to an array """ @@ -79,9 +80,10 @@ def __init__(self,ImageName,ImShape,Cell,radec,Freqs=None,KeepCasa=False,Stokes= self.imageFlipped = False self.createScratch() # Fill in some standard keywords - self.header['ORIGIN'] = 'DDFacet' + self.header['ORIGIN'] = 'DDFacet '+report_version() self.header['BTYPE'] = 'Intensity' self.header['BUNIT'] = 'Jy/beam' + self.header['SPECSYS'] = 'TOPOCENT' if header_dict is not None: for k in header_dict: self.header[k]=header_dict[k] diff --git a/DDFacet/report_version.py b/DDFacet/report_version.py new file mode 100644 index 00000000..48b25775 --- /dev/null +++ b/DDFacet/report_version.py @@ -0,0 +1,28 @@ +import os +import subprocess + +from DDFacet.version import __version__ + + +def report_version(): + # perhaps we are in a github with tags; in that case return describe + path = os.path.dirname(os.path.abspath(__file__)) + try: + # work round possible unavailability of git -C + result = subprocess.check_output('cd %s; git describe --tags' % path, shell=True, stderr=subprocess.STDOUT).rstrip() + except subprocess.CalledProcessError: + result = None + if result is not None: + # will succeed if tags exist + return result + else: + # perhaps we are in a github without tags? Cook something up if so + try: + result = subprocess.check_output('cd %s; git rev-parse --short HEAD' % path, shell=True, stderr=subprocess.STDOUT).rstrip() + except subprocess.CalledProcessError: + result = None + if result is not None: + return __version__+'-'+result + else: + # we are probably in an installed version + return __version__ From 4fa4d71448a9363a0f687e1f778c65293d36ebc1 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Mon, 11 Sep 2017 15:18:57 +0100 Subject: [PATCH 25/58] Work round odd behaviour where an error in git seems not to be picked up by subprocess code --- DDFacet/report_version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DDFacet/report_version.py b/DDFacet/report_version.py index 48b25775..ec077572 100644 --- a/DDFacet/report_version.py +++ b/DDFacet/report_version.py @@ -12,7 +12,7 @@ def report_version(): result = subprocess.check_output('cd %s; git describe --tags' % path, shell=True, stderr=subprocess.STDOUT).rstrip() except subprocess.CalledProcessError: result = None - if result is not None: + if result is not None and 'fatal' not in result: # will succeed if tags exist return result else: @@ -21,7 +21,7 @@ def report_version(): result = subprocess.check_output('cd %s; git rev-parse --short HEAD' % path, shell=True, stderr=subprocess.STDOUT).rstrip() except subprocess.CalledProcessError: result = None - if result is not None: + if result is not None and 'fatal' not in result: return __version__+'-'+result else: # we are probably in an installed version From 49fe5f772bb94e6b4212e36de5f9b60411d0b251 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 14 Sep 2017 13:22:15 +0100 Subject: [PATCH 26/58] Remove debugging print --- DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py b/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py index 19caeb1b..60a4635e 100644 --- a/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py +++ b/DDFacet/Imager/SSD/ClassImageDeconvMachineSSD.py @@ -135,7 +135,7 @@ def Init(self,**kwargs): self.setSideLobeLevel(kwargs["PSFAve"][0], kwargs["PSFAve"][1]) self.ModelMachine.setRefFreq(kwargs["RefFreq"]) # store grid and degrid freqs for ease of passing to MSMF - print kwargs["GridFreqs"],kwargs["DegridFreqs"] + #print kwargs["GridFreqs"],kwargs["DegridFreqs"] self.GridFreqs=kwargs["GridFreqs"] self.DegridFreqs=kwargs["DegridFreqs"] self.ModelMachine.setFreqMachine(kwargs["GridFreqs"], kwargs["DegridFreqs"]) From f7c1cef8881e9102d2d343f0c9769e3bdc6b41e7 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 14 Sep 2017 13:23:41 +0100 Subject: [PATCH 27/58] Fix problem with shift mode --- DDFacet/Imager/ClassFacetMachine.py | 4 ++-- DDFacet/ToolsDir/ModFFTW.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/DDFacet/Imager/ClassFacetMachine.py b/DDFacet/Imager/ClassFacetMachine.py index d3c96bb3..197d440f 100644 --- a/DDFacet/Imager/ClassFacetMachine.py +++ b/DDFacet/Imager/ClassFacetMachine.py @@ -1628,8 +1628,8 @@ def _convolveShift_worker(self, iFacet, d_mat, dl,dm, majax,minax,PA=PSFGaussParsAvg PA+=np.pi/2 - ModelConv=ModFFTW.ConvolveGaussian(Model, CellSizeRad=self.CellSizeRad, - GaussPars=[(majax,minax,PA)]) + ModelConv=ModFFTW.ConvolveGaussianSimpleWrapper(Model, CellSizeRad=self.CellSizeRad, + GaussPars=(majax,minax,PA)) #indx,indy=np.where(self._CF[iFacet]["SW"]!=0) #ModelConv[0,0,indx,indy]=ModelConv[0,0,indx,indy]/self._CF[iFacet]["SW"][indx,indy] diff --git a/DDFacet/ToolsDir/ModFFTW.py b/DDFacet/ToolsDir/ModFFTW.py index dbc96551..b616c56b 100644 --- a/DDFacet/ToolsDir/ModFFTW.py +++ b/DDFacet/ToolsDir/ModFFTW.py @@ -316,6 +316,18 @@ def ConvolveGaussianWrapper(Ain0,Sig=1.0,GaussPar=None): Out[ch,:,:,:]=Aout return Out,PSF +def ConvolveGaussianSimpleWrapper(Ain0, CellSizeRad=1.0, Sig=1.0, GaussPars=None): + nch,npol,_,_=Ain0.shape + Out=np.zeros_like(Ain0) + dict={'in':Ain0,'out':Out} + if GaussPars is None: + GaussPars=(Sig,Sig,0) + for ch in range(nch): + Aout=_convolveSingleGaussianNP(dict,'in','out',ch,CellSizeRad,GaussPars) + Out[ch,:,:,:]=Aout + return Out + + def learnFFTWWisdom(npix,dtype=np.float32): """Learns FFTW wisdom for real 2D FFT of npix x npix images""" print>>log, " Computing fftw wisdom FFTs for shape [%i x %i] and dtype %s" % (npix,npix,dtype.__name__) From 65b84b480fc05dc23844b637ba1db0d3fd2df2b5 Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Thu, 14 Sep 2017 16:45:03 +0100 Subject: [PATCH 28/58] Allow a small amount of divergence in automasking --- DDFacet/Imager/ClassImageNoiseMachine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Imager/ClassImageNoiseMachine.py b/DDFacet/Imager/ClassImageNoiseMachine.py index d6a87bf3..87562c13 100644 --- a/DDFacet/Imager/ClassImageNoiseMachine.py +++ b/DDFacet/Imager/ClassImageNoiseMachine.py @@ -150,7 +150,7 @@ def giveBrutalRestored(self,DicoResidual): print>>log,"Deconvolving on SNR map" self.GD["Deconv"]["RMSFactor"]=0. - self.GD["HMP"]["AllowResidIncrease"]=False + self.GD["HMP"]["AllowResidIncrease"]=0.1 #self.GD["HMP"]["SolverMode"]="PI" DicoVariablePSF=self.DicoVariablePSF self.NFreqBands=len(DicoVariablePSF["freqs"]) From 38c714f225bbfb84e0a1ea05c1b6e01984fd2fd1 Mon Sep 17 00:00:00 2001 From: Oleg Smirnov Date: Thu, 5 Oct 2017 12:45:31 +0200 Subject: [PATCH 29/58] fixes #361. I think. --- DDFacet/Other/AsyncProcessPool.py | 202 +++++++++++++++--------------- 1 file changed, 104 insertions(+), 98 deletions(-) diff --git a/DDFacet/Other/AsyncProcessPool.py b/DDFacet/Other/AsyncProcessPool.py index 6b925162..ba6b8ec4 100644 --- a/DDFacet/Other/AsyncProcessPool.py +++ b/DDFacet/Other/AsyncProcessPool.py @@ -353,121 +353,124 @@ def startWorkers(self): def restartWorkers(self): if self.ncpu > 1: + if self._termination_event.is_set(): + if self.verbose > 1: + print>> log, "termination event spotted, exiting" + raise WorkerProcessError() print>> log, "asking worker processes to restart" self._workers_started_event.clear() self._taras_restart_event.set() def awaitWorkerStart(self): - if self.ncpu > 1 and not self._workers_started_event.is_set(): - print>>log,"waiting for worker processes to start up" - self._workers_started_event.wait() + if self.ncpu > 1: + while not self._workers_started_event.is_set(): + if self._termination_event.is_set(): + if self.verbose > 1: + print>> log, "termination event spotted, exiting" + raise WorkerProcessError() + print>> log, "waiting for worker processes to start up" + self._workers_started_event.wait(10) def _startBulba (self): """This runs the Taras Bulba process. A Taras Bulba spawns and kills worker processes on demand. The reason for killing workers is to work around potential memory leaks. Since a Bulba is forked from the main process early on, it has a very low RAM footprint, so re-forking the workers off a Bulba every so often makes sure their RAM usage is reset.""" - Exceptions.disable_pdb_on_error() - MyLogger.subprocess_id = "TB" - self._oldhandler = None - # self.verbose = 1 - - def sighandler(signum, frame): - #if self.verbose: - # print>> log, "SIGCHLD caught: sincere lament" - self._dead_child = True - # self._taras_restart_event.set() - - # loop until the completion event is raised - # at this stage the workers are dead (or not started) - while not self._taras_exit_event.is_set(): - if self.verbose: - print>>log, "(re)creating worker processes" - # create the workers - self._compute_workers = [] - self._io_workers = [] - for i, core in enumerate(self._cores): - proc_id = "comp%02d" % i - self._compute_workers.append( - multiprocessing.Process(name=proc_id, target=self._start_worker, - args=(self, proc_id, [core], self._compute_queue, - self.pause_on_start))) - for i, queue in enumerate(self._io_queues): - proc_id = "io%02d" % i - self._io_workers.append( - multiprocessing.Process(name=proc_id, target=self._start_worker, - args=(self, proc_id, None, queue, self.pause_on_start))) - - # since the handler gets propagated to children, set it to ignore before starting a worker - signal.signal(signal.SIGCHLD, signal.SIG_IGN) - - # start the workers - if self.verbose: - print>>log, "starting worker processes" - for proc in self._compute_workers + self._io_workers: - proc.start() - - # set the real signal handler - self._dead_child = None - signal.signal(signal.SIGCHLD, sighandler) - - # set event to indicate workers are started - self._workers_started_event.set() + try: + Exceptions.disable_pdb_on_error() + MyLogger.subprocess_id = "TB" - # go to sleep until we're told to do the whole thing again - while not self._taras_restart_event.is_set() and not self._dead_child: + # loop until the completion event is raised + # at this stage the workers are dead (or not started) + while not self._taras_exit_event.is_set(): if self.verbose: - print>>log, "waiting for restart signal" - try: - self._taras_restart_event.wait(5) + print>>log, "(re)creating worker processes" + # create the workers + self._compute_workers = [] + self._io_workers = [] + for i, core in enumerate(self._cores): + proc_id = "comp%02d" % i + self._compute_workers.append( + multiprocessing.Process(name=proc_id, target=self._start_worker, + args=(self, proc_id, [core], self._compute_queue, + self.pause_on_start))) + for i, queue in enumerate(self._io_queues): + proc_id = "io%02d" % i + self._io_workers.append( + multiprocessing.Process(name=proc_id, target=self._start_worker, + args=(self, proc_id, None, queue, self.pause_on_start))) + + # start the workers + if self.verbose: + print>>log, "starting worker processes" + worker_map = {} + for proc in self._compute_workers + self._io_workers: + proc.start() + worker_map[proc.pid] = proc + dead_workers = {} + + # set event to indicate workers are started + self._workers_started_event.set() + + # go to sleep until we're told to do the whole thing again + while not self._taras_restart_event.is_set(): if self.verbose: - print>>log, "wait done" - except KeyboardInterrupt: - print>>log,ModColor.Str("Ctrl+C caught, exiting") - self._termination_event.set() - self._taras_exit_event.set() - # check for dead workers. This is probably a good time to bail out - if self._dead_child: - self._dead_child = 0 - for p in self._compute_workers + self._io_workers: - if not p.is_alive(): - if p.exitcode < 0: - print>>log,ModColor.Str("worker '%s' killed by signal %s" % (p.name, SIGNALS_TO_NAMES_DICT[-p.exitcode])) + print>>log, "waiting for restart signal" + try: + self._taras_restart_event.wait(5) + if self.verbose: + print>>log, "wait done" + except KeyboardInterrupt: + print>>log,ModColor.Str("Ctrl+C caught, exiting") + self._termination_event.set() + self._taras_exit_event.set() + # check for dead children + for pid, proc in worker_map.iteritems(): + if not proc.is_alive(): + proc.join() + dead_workers[proc.pid] = proc + if proc.exitcode < 0: + print>>log,ModColor.Str("worker '%s' killed by signal %s" % (proc.name, SIGNALS_TO_NAMES_DICT[-proc.exitcode])) else: - print>>log,ModColor.Str("worker '%s' died with exit code %d"%(p.name, p.exitcode)) - self._dead_child += 1 - # if it was really our workers that died, initiate bailout - if self._dead_child: - print>>log,ModColor.Str("%d worker(s) have died. Initiating shutdown."%self._dead_child) - self._taras_restart_event.set() + print>>log,ModColor.Str("worker '%s' died with exit code %d"%(proc.name, proc.exitcode)) + # if workers have died, initiate bailout + if dead_workers: + print>>log,ModColor.Str("%d worker(s) have died. Initiating shutdown."%len(dead_workers)) + self._taras_restart_event.set() # to break out of loop self._termination_event.set() self._taras_exit_event.set() - self._taras_restart_event.clear() - if self._termination_event.is_set(): + self._taras_restart_event.clear() + if self._termination_event.is_set(): + if self.verbose: + print>> log, "terminating workers" + for proc in worker_map.itervalues(): + if proc.is_alive(): + proc.terminate() + # else let them all shut down in an orderly way, by giving them a poison pill + else: + if self.verbose: + print>> log, "restart signal received, notifying workers" + for proc in self._compute_workers: + if proc.is_alive(): + self._compute_queue.put("POISON-E") + for proc, queue in zip(self._io_workers, self._io_queues): + if proc.is_alive(): + queue.put("POISON-E") if self.verbose: - print>> log, "terminating workers" - for p in self._compute_workers + self._io_workers: - if p.is_alive(): - p.terminate() - # else let them all shut down in an orderly way, by giving them a poison pill - else: + print>> log, "reaping workers" + # join processes + for proc in worker_map.itervalues(): + proc.join() if self.verbose: - print>> log, "restart signal received, notifying workers" - for p in self._compute_workers: - if p.is_alive(): - self._compute_queue.put("POISON-E") - for p, queue in zip(self._io_workers, self._io_queues): - if p.is_alive(): - queue.put("POISON-E") - if self.verbose: - print>> log, "reaping workers" - # join processes - for p in self._compute_workers + self._io_workers: - p.join() + print>> log, "all workers rejoined" if self.verbose: - print>> log, "all workers rejoined" - if self.verbose: - print>>log, "exiting" + print>>log, "exiting" + except: + print>>log,ModColor.Str("exception raised in Taras Bulba process, see below. This is a bug!") + print>>log,traceback.format_exc() + self._workers_started_event.set() + self._termination_event.set() + self._taras_exit_event.set() def runJob (self, job_id, handler=None, io=None, args=(), kwargs={}, event=None, counter=None, @@ -723,9 +726,12 @@ def shutdown(self): self._taras_exit_event.set() self._taras_restart_event.set() if self._taras_bulba: - if self.verbose > 1: - print>>log,"shutdown: waiting for TB to exit" - self._taras_bulba.join() +# if self._taras_bulba.is_alive(): + if self.verbose > 1: + print>> log, "shutdown: waiting for TB to exit" + self._taras_bulba.join() +# else: +# print>> log, "shutdown: TB is already dead" if self.verbose > 1: print>> log, "shutdown: closing queues" # join and close queues From 698e172094d0178aa4c5937b24b2b0dec077588c Mon Sep 17 00:00:00 2001 From: Martin Hardcastle Date: Tue, 10 Oct 2017 11:31:08 +0100 Subject: [PATCH 30/58] Fix for smooth beam in data with non-matching channel numbers --- DDFacet/Data/ClassBeamMean.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/DDFacet/Data/ClassBeamMean.py b/DDFacet/Data/ClassBeamMean.py index b2e243f2..31c837a6 100644 --- a/DDFacet/Data/ClassBeamMean.py +++ b/DDFacet/Data/ClassBeamMean.py @@ -142,6 +142,7 @@ def StackBeam(self,ThisMSData,iDir): A1s=A1[ind] fs=flags[ind] Ws=W[ind] + MSnchan=Ws.shape[1] T.timeit("1") nd,na,nch,_,_=J.shape @@ -172,7 +173,8 @@ def StackBeam(self,ThisMSData,iDir): WW=Ws**2 T.timeit("4") - WW=WW.reshape((1,ind.size,self.MS.Nchan)) + print>>log, 'WW.shape is', WW.shape + WW=WW.reshape((1,ind.size,MSnchan)) T.timeit("5") JJsq=WW*JJ**2 T.timeit("6") @@ -188,8 +190,8 @@ def StackBeam(self,ThisMSData,iDir): for iBand in range(self.VS.NFreqBands): indFreqBand,=np.where(ChanToFreqBand==iBand) if indFreqBand.size==0: continue - self.StackedBeamDict[iDir]["SumJJsq"][iBand]+=np.sum(SumWsqThisRange.reshape((self.MS.Nchan,))[indFreqBand]) - self.StackedBeamDict[iDir]["SumWsq"][iBand]+=np.sum(SumWsq.reshape((self.MS.Nchan,))[indFreqBand]) + self.StackedBeamDict[iDir]["SumJJsq"][iBand]+=np.sum(SumWsqThisRange.reshape((MSnchan,))[indFreqBand]) + self.StackedBeamDict[iDir]["SumWsq"][iBand]+=np.sum(SumWsq.reshape((MSnchan,))[indFreqBand]) #print SumWsq,self.SumWsq,self.SumJJsq.shape,J0.shape T.timeit("9") From 5f9c4598a12bc5d737c5886ed61e775eb4ad2dda Mon Sep 17 00:00:00 2001 From: Oleg Smirnov Date: Tue, 10 Oct 2017 14:22:17 +0200 Subject: [PATCH 31/58] another race condition fixed (related to #361). Added brute-force purge of particularly survivalist workers --- DDFacet/Other/AsyncProcessPool.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/DDFacet/Other/AsyncProcessPool.py b/DDFacet/Other/AsyncProcessPool.py index ba6b8ec4..fb2ffc39 100644 --- a/DDFacet/Other/AsyncProcessPool.py +++ b/DDFacet/Other/AsyncProcessPool.py @@ -451,16 +451,20 @@ def _startBulba (self): if self.verbose: print>> log, "restart signal received, notifying workers" for proc in self._compute_workers: - if proc.is_alive(): - self._compute_queue.put("POISON-E") + self._compute_queue.put("POISON-E") for proc, queue in zip(self._io_workers, self._io_queues): - if proc.is_alive(): - queue.put("POISON-E") + queue.put("POISON-E") if self.verbose: print>> log, "reaping workers" # join processes - for proc in worker_map.itervalues(): - proc.join() + for pid, proc in worker_map.iteritems(): + if self.verbose: + print>> log, "joining worker '%s' (%d)"%(proc.name, pid) + proc.join(5) + if proc.is_alive(): + print>> log, ModColor.Str("worker '%s' clinging on to life after 5s, killing it"%proc.name) + proc.terminate() + proc.join(5) if self.verbose: print>> log, "all workers rejoined" if self.verbose: From 2c299176b5d0c0e2dc35377f4e5b690cb1856acd Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 13 Oct 2017 12:30:36 +0200 Subject: [PATCH 32/58] Update ClassDeconvMachine.py Fixes #389. See comment in issue. --- DDFacet/Imager/ClassDeconvMachine.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index 95d30289..e30d5e14 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -1944,8 +1944,11 @@ def alphaconvmap(): # beam=self.FWHMBeamAvg, Stokes=self.VS.StokesConverter.RequiredStokesProducts())) # mixed-flux restored image - if havenorm and "x" in self._saveims: - a, b = appres(), intconvmodel() + # (apparent noise + intrinsic model) if intrinsic model is available + # (apparent noise + apparent model) otherwise, (JonesNorm ~= 1) + if "x" in self._saveims: + a, b = appres(), intconvmodel() if havenorm else \ + appres(), appconvmodel() out = _images.addSharedArray('mixrestored', a.shape, a.dtype) numexpr.evaluate('a+b', out=out) APP.runJob("save:mixrestored", self._saveImage_worker, io=0, args=(_images.readwrite(), "mixrestored",), From 215447ecb064ee9e05ba146f5a986f24496d998b Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 13 Oct 2017 14:19:27 +0200 Subject: [PATCH 33/58] Update ClassDeconvMachine.py --- DDFacet/Imager/ClassDeconvMachine.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DDFacet/Imager/ClassDeconvMachine.py b/DDFacet/Imager/ClassDeconvMachine.py index e30d5e14..0615981c 100644 --- a/DDFacet/Imager/ClassDeconvMachine.py +++ b/DDFacet/Imager/ClassDeconvMachine.py @@ -1944,11 +1944,11 @@ def alphaconvmap(): # beam=self.FWHMBeamAvg, Stokes=self.VS.StokesConverter.RequiredStokesProducts())) # mixed-flux restored image - # (apparent noise + intrinsic model) if intrinsic model is available - # (apparent noise + apparent model) otherwise, (JonesNorm ~= 1) + # (apparent noise + intrinsic model) if intrinsic model is available + # (apparent noise + apparent model) otherwise, (JonesNorm ~= 1) if "x" in self._saveims: - a, b = appres(), intconvmodel() if havenorm else \ - appres(), appconvmodel() + a, b = (appres(), intconvmodel()) if havenorm else \ + (appres(), appconvmodel()) out = _images.addSharedArray('mixrestored', a.shape, a.dtype) numexpr.evaluate('a+b', out=out) APP.runJob("save:mixrestored", self._saveImage_worker, io=0, args=(_images.readwrite(), "mixrestored",), From d26f13254f6cccb4b3a1dd22f7cfc51060a1b831 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Thu, 2 Nov 2017 09:24:42 +0200 Subject: [PATCH 34/58] Update ClassCompareFITSImage.py --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index 7e801e4f..c6e679c5 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -26,6 +26,7 @@ import numpy as np from DDFacet.Parset.ReadCFG import Parset from astropy.io import fits +from nose.tools import timed class ClassCompareFITSImage(unittest.TestCase): """ Automated assurance test: reference FITS file regression (abstract class) @@ -142,6 +143,7 @@ def setParsetOption(cls, section, option, value): cls._defaultParset.set(section, option, value) @classmethod + @timed(10800) # should really not take longer than 3 hours def setUpClass(cls): unittest.TestCase.setUpClass() cls._inputDir = getenv('DDFACET_TEST_DATA_DIR','./')+"/" From def3056f78b0da2d9747d99e7dd7d021c8f55368 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Thu, 2 Nov 2017 20:59:40 +0200 Subject: [PATCH 35/58] Update ClassCompareFITSImage.py --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index c6e679c5..f8782755 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -143,7 +143,7 @@ def setParsetOption(cls, section, option, value): cls._defaultParset.set(section, option, value) @classmethod - @timed(10800) # should really not take longer than 3 hours + @timed(18000) # should really not take longer than 5 hours def setUpClass(cls): unittest.TestCase.setUpClass() cls._inputDir = getenv('DDFACET_TEST_DATA_DIR','./')+"/" From 0b4295f8d7d00a4a9a98bd52e358c7a82405c7e5 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Mon, 6 Nov 2017 16:26:05 +0200 Subject: [PATCH 36/58] Update ClassCompareFITSImage.py Kill DDF pipe after 6 hours --- .../ClassCompareFITSImage.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index f8782755..9e9b3de5 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -26,7 +26,7 @@ import numpy as np from DDFacet.Parset.ReadCFG import Parset from astropy.io import fits -from nose.tools import timed +from subprocess import Popen, TimeoutExpired class ClassCompareFITSImage(unittest.TestCase): """ Automated assurance test: reference FITS file regression (abstract class) @@ -143,7 +143,6 @@ def setParsetOption(cls, section, option, value): cls._defaultParset.set(section, option, value) @classmethod - @timed(18000) # should really not take longer than 5 hours def setUpClass(cls): unittest.TestCase.setUpClass() cls._inputDir = getenv('DDFACET_TEST_DATA_DIR','./')+"/" @@ -213,8 +212,18 @@ def setUpClass(cls): stderr_file = open(cls._stderrLogFile, 'w') with stdout_file, stderr_file: - subprocess.check_call(args, env=os.environ.copy(), - stdout=stdout_file, stderr=stderr_file) + p = Popen(args, + env=os.environ.copy() + stdout=stdout_file, + stderr=stderr_file) + try: + ret = p.wait(21600) + except TimeoutExpired: + p.kill() + raise + if ret != 0: + raise RuntimeError("DDF exited with non-zero return code %d" % ret) + #Finally open up output FITS files for testing and build a dictionary of them for ref_id in cls.defineImageList(): From 54cf87d9e6c59a827536db8d5d0d6bf1d787e89a Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Tue, 7 Nov 2017 14:36:30 +0200 Subject: [PATCH 37/58] Update ClassCompareFITSImage.py --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index 9e9b3de5..901c53bf 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -213,7 +213,7 @@ def setUpClass(cls): with stdout_file, stderr_file: p = Popen(args, - env=os.environ.copy() + env=os.environ.copy(), stdout=stdout_file, stderr=stderr_file) try: From d1413d59c177fb605787463f66c9260054f36edb Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 10 Nov 2017 17:48:26 +0200 Subject: [PATCH 38/58] Python 2.7 compatible timeout polling Previous attempt only works in Python 3. This should work with 2.7 --- .../ClassCompareFITSImage.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index 901c53bf..caaea7cc 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -26,7 +26,8 @@ import numpy as np from DDFacet.Parset.ReadCFG import Parset from astropy.io import fits -from subprocess import Popen, TimeoutExpired +from subprocess import Popen +import time class ClassCompareFITSImage(unittest.TestCase): """ Automated assurance test: reference FITS file regression (abstract class) @@ -216,11 +217,17 @@ def setUpClass(cls): env=os.environ.copy(), stdout=stdout_file, stderr=stderr_file) - try: - ret = p.wait(21600) - except TimeoutExpired: + x = 21600 + delay = 1.0 + timeout = int(x / delay) + while task.poll() is None and timeout > 0: + time.sleep(delay) + timeout -= delay + #timeout reached, kill process if it is still rolling + if task.poll() is None: p.kill() - raise + ret = 1 + if ret != 0: raise RuntimeError("DDF exited with non-zero return code %d" % ret) From 1a275a8a15d454a1a1696e143a98a1a89504997c Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 10 Nov 2017 18:24:12 +0200 Subject: [PATCH 39/58] Update README.rst Add directory names --- README.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index e2f8fb85..63792e40 100644 --- a/README.rst +++ b/README.rst @@ -204,7 +204,17 @@ Acceptance test data can be found on the Jenkins server in the **/data/test-data Adding more tests and creating new reference images. --------------------------------------------------------- To resimulate images and add more tests: -In the Jenkins server data directory run **make** to resimulate and set up new reference images. This should only be done with the ``origin/master`` branch - not your branch or fork! You should manually verify that all the reference images are correct when you regenerate them. Each time you add a new option to DDFacet also add an option to the makefile in this directory. Once the option is set up in the makefile you can build the reference images on Jenkins. + +In the Jenkins server data directory add a recipe to the makefile simulate and/or set up new reference images. This should only be done with the ``origin/master`` branch - not your branch or fork! Use the ddfacet-generate-refims task +to do this. You should manually verify that all the reference images are correct when you regenerate them. Each time you add a new option to DDFacet also add an option to the makefile in this directory. Once the option is set up in the makefile you can build the reference images on Jenkins. + +Important directories on the CI server: +--------------------------------------------------------- + - Reference data stored here: /var/lib/jenkins/test-data + - /var/lib/jenkins/jobs/ddfacet-pr-build/workspace + - /var/lib/jenkins/jobs/DDFacet_master_cron/workspace + - /var/lib/jenkins/jobs/DDFacet_experimental/workspace + [tf_pip_install]: From 962c9b80883b0482d06c676665c7efd6fc29f901 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 10 Nov 2017 18:41:52 +0200 Subject: [PATCH 40/58] Unfix specific versions for release Now that things are reasonably stable we can make this more flexible. As per discussion with Gijs: set minimum requirements on versions that gets installed on users' machines. --- setup.py | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/setup.py b/setup.py index 2a089879..16466756 100755 --- a/setup.py +++ b/setup.py @@ -123,28 +123,28 @@ def define_scripts(): }, packages=[pkg, skymodel_pkg], install_requires=[ - "Cython == 0.25.2", - "numpy == 1.13.0rc2", - "SharedArray == 2.0.2", - "Polygon2 == 2.0.8", - "pyFFTW == 0.10.4", - "nose == 1.3.7", - "astropy == 1.3.3", - "deap == 1.0.1", #invalid release tag on v1.0.2 refuse to use - "ipdb == 0.10.3", - "python-casacore == 2.1.2", - "pyephem == 3.7.6.0", - "numexpr == 2.6.2", - "pyfits == 3.4", - "matplotlib == 2.0.2", - "scipy == 0.19.0", - "astro-kittens == 0.3.3", - "meqtrees-cattery == 1.5.1", - "owlcat == 1.4.2", - "astLib == 0.8.0", - "psutil == 5.2.2", - "astro-tigger == 1.3.5", - "py-cpuinfo == 3.2.0", + "Cython >= 0.25.2", + "numpy >= 1.13.0rc2", + "SharedArray >= 2.0.2", + "Polygon2 >= 2.0.8", + "pyFFTW >= 0.10.4", + "nose >= 1.3.7", + "astropy >= 1.3.3", + "deap >= 1.0.1", + "ipdb >= 0.10.3", + "python-casacore >= 2.1.2", + "pyephem >= 3.7.6.0", + "numexpr >= 2.6.2", + "pyfits >= 3.4", + "matplotlib >= 2.0.2", + "scipy >= 0.19.0", + "astro-kittens >= 0.3.3", + "meqtrees-cattery >= 1.5.1", + "owlcat >= 1.4.2", + "astLib >= 0.8.0", + "psutil >= 5.2.2", + "astro-tigger >= 1.3.5", + "py-cpuinfo >= 3.2.0", ], include_package_data=True, zip_safe=False, From 16d7fc3f3c9c3f85c8bf70f58aa5484419c9afd7 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 10 Nov 2017 18:57:56 +0200 Subject: [PATCH 41/58] fix typo --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index caaea7cc..3b9b2039 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -220,11 +220,11 @@ def setUpClass(cls): x = 21600 delay = 1.0 timeout = int(x / delay) - while task.poll() is None and timeout > 0: + while p.poll() is None and timeout > 0: time.sleep(delay) timeout -= delay #timeout reached, kill process if it is still rolling - if task.poll() is None: + if p.poll() is None: p.kill() ret = 1 From 643a06c90f6241f8e85dafce55b5989500b194c6 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 10 Nov 2017 19:00:57 +0200 Subject: [PATCH 42/58] fix logic - didn't catch return value --- DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py index 3b9b2039..59ac8777 100644 --- a/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py +++ b/DDFacet/Tests/ShortAcceptanceTests/ClassCompareFITSImage.py @@ -224,7 +224,8 @@ def setUpClass(cls): time.sleep(delay) timeout -= delay #timeout reached, kill process if it is still rolling - if p.poll() is None: + ret = p.poll() + if ret is None: p.kill() ret = 1 From e87f7dedb96a9ff78ae9bdf7175390a9fce37c7d Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 16:10:51 +0200 Subject: [PATCH 43/58] Update Dockerfile Bump up to KERN 3 --- Dockerfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index e355295e..141a5e73 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ #From Ubuntu 16.04 -FROM kernsuite/base:2 +FROM kernsuite/base:3 MAINTAINER Ben Hugo "bhugo@ska.ac.za" #Package dependencies @@ -58,7 +58,7 @@ ENV DEB_DEPENCENDIES \ RUN apt-get update && \ apt-get install -y software-properties-common && \ - add-apt-repository -y -s ppa:kernsuite/kern-2 && \ + add-apt-repository -y -s ppa:kernsuite/kern-3 && \ apt-add-repository -y multiverse && \ apt-get update && \ apt-get install -y $DEB_SETUP_DEPENDENCIES && \ @@ -66,13 +66,13 @@ RUN apt-get update && \ #Setup a virtual environment for the python packages pip install -U pip virtualenv setuptools wheel && \ virtualenv --system-site-packages /ddfvenv && \ + # Install Montblanc and all other optional dependencies + pip install -r /src/DDFacet/requirements.txt && \ cd /src/DDFacet/ && git submodule update --init --recursive && cd / && \ # Activate virtual environment . /ddfvenv/bin/activate && \ - # Install DDFacet + # Finally install DDFacet pip install -I --force-reinstall --no-binary :all: /src/DDFacet/ && \ - # Install Montblanc and all other optional dependencies - pip install -r /src/DDFacet/requirements.txt && \ # Nuke the unused & cached binaries needed for compilation, etc. rm -r /src/DDFacet && \ apt-get remove -y $DEB_SETUP_DEPENDENCIES && \ From 7544160dcd31c4e0096c268e0c0a838818bcc819 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 16:25:12 +0200 Subject: [PATCH 44/58] Update requirements.txt Move optional requirements to requirements.txt --- requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index aa63130f..313344f5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ git+https://github.com/ska-sa/montblanc.git@65ffb611f5 -pymoresane == 0.3.0 +pymoresane >= 0.3.0 +"Cython >= 0.25.2" +"nose >= 1.3.7" From da0e0dccc96ee0093648da61a9c85c37a8e6f30d Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 16:28:58 +0200 Subject: [PATCH 45/58] Update setup.py Remove optional dependencies - moved to requirements.txt --- setup.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 16466756..b7f4373b 100755 --- a/setup.py +++ b/setup.py @@ -123,12 +123,10 @@ def define_scripts(): }, packages=[pkg, skymodel_pkg], install_requires=[ - "Cython >= 0.25.2", - "numpy >= 1.13.0rc2", + "numpy >= 1.11.0", "SharedArray >= 2.0.2", "Polygon2 >= 2.0.8", "pyFFTW >= 0.10.4", - "nose >= 1.3.7", "astropy >= 1.3.3", "deap >= 1.0.1", "ipdb >= 0.10.3", From 05a00ef07b4d037574200bd6f61390ce00d5cc5d Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 16:33:05 +0200 Subject: [PATCH 46/58] Update setup.py Remove git tag based version generation as this apparently breaks Debian packaging process. --- setup.py | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/setup.py b/setup.py index b7f4373b..498ec263 100755 --- a/setup.py +++ b/setup.py @@ -34,25 +34,11 @@ build_root=os.path.dirname(__file__) def get_version(): - # Versioning code here, based on - # http://blogs.nopcode.org/brainstorm/2013/05/20/pragmatic-python-versioning-via-setuptools-and-git-tags/ - - # Fetch version from git tags, and write to version.py. - # Also, when git is not available (PyPi package), use stored version.py. + # Get the version code from version.py + # version.py must be updated manually before releases + # (git tag-based generation breaks Debian packaging process) version_py = os.path.join(build_root, pkg, 'version.py') - try: - version_git = subprocess.check_output(['git', 'describe', '--tags', - '--abbrev=0']).rstrip() - except: - with open(version_py, 'r') as fh: - version_git = open(version_py).read().strip().split('=')[-1].replace('"','') - - version_msg = "# Do not edit this file, pipeline versioning is governed by git tags" - - with open(version_py, 'w') as fh: - fh.write(version_msg + os.linesep + "__version__=\"" + version_git +"\"") - return version_git def backend(compile_options): From 6e9ab377c4e4cbe22ff31bf7cbc9b39ea43a1c0e Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 21:09:00 +0200 Subject: [PATCH 47/58] Update setup.py Move version into setup.py --- setup.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/setup.py b/setup.py index 498ec263..9f982a88 100755 --- a/setup.py +++ b/setup.py @@ -31,16 +31,9 @@ pkg='DDFacet' skymodel_pkg='SkyModel' +__version__ = "0.2.dev3" build_root=os.path.dirname(__file__) -def get_version(): - # Get the version code from version.py - # version.py must be updated manually before releases - # (git tag-based generation breaks Debian packaging process) - version_py = os.path.join(build_root, pkg, 'version.py') - - return version_git - def backend(compile_options): if compile_options is not None: print >> sys.stderr, "Compiling extension libraries with user defined options: '%s'"%compile_options @@ -90,7 +83,7 @@ def define_scripts(): return [os.path.join(pkg, script_name) for script_name in ['DDF.py', 'CleanSHM.py', 'MemMonitor.py', 'Restore.py', 'SelfCal.py']] setup(name=pkg, - version=get_version(), + version=__version__, description='Facet-based radio astronomy continuum imager', url='http://github.com/cyriltasse/DDFacet', classifiers=[ From dc5620a28662b08b450ee63fd6d74b419ce7e261 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 21:11:23 +0200 Subject: [PATCH 48/58] Update __init__.py Add version at package init level --- DDFacet/__init__.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/DDFacet/__init__.py b/DDFacet/__init__.py index 6c677ea9..48d7bc5b 100644 --- a/DDFacet/__init__.py +++ b/DDFacet/__init__.py @@ -16,4 +16,10 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -''' \ No newline at end of file +''' + +import pkg_resources +try: + __version__ = pkg_resources.require("kliko")[0].version +except pkg_resources.DistributionNotFound: + __version__ = "dev" From b20b3b93252987d2e5fe49a444dd566ed0bcc019 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 21:13:20 +0200 Subject: [PATCH 49/58] Update __init__.py --- DDFacet/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/__init__.py b/DDFacet/__init__.py index 48d7bc5b..15251a5b 100644 --- a/DDFacet/__init__.py +++ b/DDFacet/__init__.py @@ -20,6 +20,6 @@ import pkg_resources try: - __version__ = pkg_resources.require("kliko")[0].version + __version__ = pkg_resources.require("DDFacet")[0].version except pkg_resources.DistributionNotFound: __version__ = "dev" From 557735816810efc677a7db85326c0838527a98ae Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 21:15:16 +0200 Subject: [PATCH 50/58] Update report_version.py Report package level version --- DDFacet/report_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DDFacet/report_version.py b/DDFacet/report_version.py index ec077572..6a57b80e 100644 --- a/DDFacet/report_version.py +++ b/DDFacet/report_version.py @@ -1,7 +1,7 @@ import os import subprocess -from DDFacet.version import __version__ +from DDFacet import __version__ def report_version(): From 59648c9badaad8704b2498ed8e6c95c61854155d Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 21:16:16 +0200 Subject: [PATCH 51/58] Delete version.py Moved version to package init level --- DDFacet/version.py | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 DDFacet/version.py diff --git a/DDFacet/version.py b/DDFacet/version.py deleted file mode 100644 index a42d30c7..00000000 --- a/DDFacet/version.py +++ /dev/null @@ -1,2 +0,0 @@ -# Do not edit this file, pipeline versioning is governed by git tags -__version__="0.1.dev6" \ No newline at end of file From 8399c4389816caa5fbd29d7a2a65b2f0f0620b11 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sat, 11 Nov 2017 21:27:54 +0200 Subject: [PATCH 52/58] Update README.rst Add instructions on installing the requirements.txt file for developers (required for compilation, since cython is now an optional requirement). Bump up to KERN 3 --- README.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 63792e40..10878fc7 100644 --- a/README.rst +++ b/README.rst @@ -90,11 +90,11 @@ We prefer that users use DDFacet though Docker. However, if this is not availabl environments) we recommend you use a virtual environment. If you install it directly into your system packages you're on your own -- be warned!! -1. You need to add in the KERN 2 ppa if you don't already have it:: +1. You need to add in the KERN 3 ppa if you don't already have it:: - add-apt-repository -y -s ppa:kernsuite/kern-2 + add-apt-repository -y -s ppa:kernsuite/kern-3 -2. Install each of the debian dependencies. The latest full list of apt dependencies can be be found in the Dockerfile +2. Install each of the debian dependencies. The latest full list of apt dependencies can be be found in the Dockerfile 3. Create a virtual environment somewhere on your system and activate:: @@ -143,6 +143,7 @@ To setup your local development environment navigate clone DDFacet and run:: (ddfvenv) $ cd DDFacet (ddfvenv) $ git submodule update --init --recursive (ddfvenv) $ cd .. + (ddfvenv) $ pip install -r requirements.txt (ddfvenv) $ pip install -e DDFacet/ #To (re-)build the backend in your checked out folder: (ddfvenv) $ cd DDFacet From e561e97245b60fd8dd8279402657c5a1218a6289 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Sun, 12 Nov 2017 15:56:34 +0200 Subject: [PATCH 53/58] Update README.rst Remove -e specifier from pypi install instructions --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 63792e40..ee1455ce 100644 --- a/README.rst +++ b/README.rst @@ -106,7 +106,7 @@ on your own -- be warned!! 4. Then, install directly from the Python Package Index (PyPI) using pip - **ensure your venv is activated**:: pip install -U pip setuptools - pip install -e DDFacet --force-reinstall -U + pip install DDFacet --force-reinstall -U 5. When you're done with your imaging business:: From b267b642f3a29af3f4bb7b13381f5894b593ef9b Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Wed, 15 Nov 2017 22:20:07 +0200 Subject: [PATCH 54/58] typo fix --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 313344f5..2944bfb2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ git+https://github.com/ska-sa/montblanc.git@65ffb611f5 pymoresane >= 0.3.0 -"Cython >= 0.25.2" +"cython >= 0.25.2" "nose >= 1.3.7" From aaf8648130bdea6adb146a4bc09749b0faab0153 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Thu, 16 Nov 2017 09:25:00 +0200 Subject: [PATCH 55/58] Typo fix --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2944bfb2..33a464b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ git+https://github.com/ska-sa/montblanc.git@65ffb611f5 pymoresane >= 0.3.0 -"cython >= 0.25.2" -"nose >= 1.3.7" +cython >= 0.25.2 +nose >= 1.3.7 From 37e21e888265898fad8c4c27d19a17de11ad4795 Mon Sep 17 00:00:00 2001 From: Benjamin Hugo Date: Fri, 17 Nov 2017 09:58:37 +0200 Subject: [PATCH 56/58] Update README.rst Fix instructions --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index ee1455ce..8d9d079d 100644 --- a/README.rst +++ b/README.rst @@ -38,6 +38,7 @@ widely supported containerization framework, called Docker. This package is on P $ virtualenv stimelavenv $ source stimelavenv/bin/activate (stimelavenv)$ pip install -U pip wheel setuptools + (stimelavenv)$ pip install stimela 4. Run ``stimela pull`` and ``stimela build`` to pull all the latest astronomy software from DockerHub (this will take a while and is several GiB in size, so ensure you're on a fast link):: From 0a9ed38102cf5fd9ada91fde65592f1706598755 Mon Sep 17 00:00:00 2001 From: Martin Reinecke Date: Thu, 7 Dec 2017 19:07:21 +0100 Subject: [PATCH 57/58] try to fix issues --- DDFacet/Gridder/Arrays.c | 2 +- DDFacet/Gridder/Arrays.h | 4 +--- DDFacet/Gridder/JonesServer.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/DDFacet/Gridder/Arrays.c b/DDFacet/Gridder/Arrays.c index 1be65d02..a75c4c15 100644 --- a/DDFacet/Gridder/Arrays.c +++ b/DDFacet/Gridder/Arrays.c @@ -57,7 +57,7 @@ static PyMethodDef _pyArrays_testMethods[] = { {"pyWhereMax", pyWhereMax, METH_VARARGS}, {"pyWhereMaxMask", pyWhereMaxMask, METH_VARARGS}, {"pySetOMPNumThreads", pySetOMPNumThreads, METH_VARARGS}, - {"pySetOMPDynamicNumThreads", pySetOMPNumThreads, METH_VARARGS}, + {"pySetOMPDynamicNumThreads", pySetOMPDynamicNumThreads, METH_VARARGS}, {NULL, NULL} /* Sentinel - marks the end of this structure */ }; diff --git a/DDFacet/Gridder/Arrays.h b/DDFacet/Gridder/Arrays.h index 7b794f82..e9e99b6c 100644 --- a/DDFacet/Gridder/Arrays.h +++ b/DDFacet/Gridder/Arrays.h @@ -83,6 +83,4 @@ static PyObject *pyDivArray(PyObject *self, PyObject *args); static PyObject *pyWhereMax(PyObject *self, PyObject *args); static PyObject *pyWhereMaxMask(PyObject *self, PyObject *args); static PyObject *pySetOMPNumThreads(PyObject *self, PyObject *args); - - - +static PyObject *pySetOMPDynamicNumThreads(PyObject *self, PyObject *args); diff --git a/DDFacet/Gridder/JonesServer.c b/DDFacet/Gridder/JonesServer.c index 1da735b5..e13234f6 100644 --- a/DDFacet/Gridder/JonesServer.c +++ b/DDFacet/Gridder/JonesServer.c @@ -126,7 +126,7 @@ void GiveJones(float complex *ptrJonesMatrices, int *JonesDims, float *ptrCoefs, if(ptrCoefs[idir]==0){continue;} size_t offJ0=i_t*nd_Jones*na_Jones*nch_Jones*4 +i_dir*na_Jones*nch_Jones*4 - +i_ant0*nch_Jones*4; + +i_ant0*nch_Jones*4 +iChJones*4; float coef; From 8b2f2f811eaa87c3292a0a628c9d11629e0851bd Mon Sep 17 00:00:00 2001 From: bennahugo Date: Wed, 13 Dec 2017 22:51:54 +0200 Subject: [PATCH 58/58] Update to latest skymodel --- SkyModel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SkyModel b/SkyModel index 91b24630..2b54a3b2 160000 --- a/SkyModel +++ b/SkyModel @@ -1 +1 @@ -Subproject commit 91b246302f34222759ee07db084d72d3ed5c35d9 +Subproject commit 2b54a3b22875ebf7f4283242658c7783895f4490