diff --git a/docs/source/apis/inputs.rising_bubble.rst b/docs/source/apis/inputs.rising_bubble.rst index 738e1da2..baf5726a 100644 --- a/docs/source/apis/inputs.rising_bubble.rst +++ b/docs/source/apis/inputs.rising_bubble.rst @@ -13,8 +13,6 @@ .. autosummary:: - hydrostatic_state - set_explicit_boundary_data sol_init diff --git a/docs/source/apis/run_scripts.test_suite.rst b/docs/source/apis/run_scripts.test_suite.rst index 6ac6c493..8e5c244f 100644 --- a/docs/source/apis/run_scripts.test_suite.rst +++ b/docs/source/apis/run_scripts.test_suite.rst @@ -13,12 +13,6 @@ - .. rubric:: Classes - - .. autosummary:: - - compare_sol - diff --git a/docs/source/apis/src.data_assimilation.blending.rst b/docs/source/apis/src.data_assimilation.blending.rst index e97bdbb2..a312c6c5 100644 --- a/docs/source/apis/src.data_assimilation.blending.rst +++ b/docs/source/apis/src.data_assimilation.blending.rst @@ -15,18 +15,12 @@ blending_after_timestep blending_before_timestep - colored - compressibility - deepcopy do_comp_to_psinc_conv do_hydro_to_nonhydro_conv do_lake_to_swe_conv do_nonhydro_to_hydro_conv do_psinc_to_comp_conv do_swe_to_lake_conv - is_compressible - is_nonhydrostatic - nonhydrostasy diff --git a/docs/source/apis/src.data_assimilation.letkf.rst b/docs/source/apis/src.data_assimilation.letkf.rst index ebd26456..05580665 100644 --- a/docs/source/apis/src.data_assimilation.letkf.rst +++ b/docs/source/apis/src.data_assimilation.letkf.rst @@ -14,15 +14,9 @@ .. autosummary:: bin_func - convolve2d da_interface - deepcopy - interpn interpolation_func - map_coordinates none_func - sliding_window_view - spsolve @@ -32,8 +26,6 @@ .. autosummary:: - BdryType - ProgressBar analysis prepare_rloc diff --git a/docs/source/apis/src.data_assimilation.localisation.rst b/docs/source/apis/src.data_assimilation.localisation.rst index c6bbbf3c..31f43c59 100644 --- a/docs/source/apis/src.data_assimilation.localisation.rst +++ b/docs/source/apis/src.data_assimilation.localisation.rst @@ -20,12 +20,6 @@ - .. rubric:: Classes - - .. autosummary:: - - BdryType - diff --git a/docs/source/apis/src.data_assimilation.params.rst b/docs/source/apis/src.data_assimilation.params.rst index 95c97c7a..f2e67d88 100644 --- a/docs/source/apis/src.data_assimilation.params.rst +++ b/docs/source/apis/src.data_assimilation.params.rst @@ -9,13 +9,6 @@ - .. rubric:: Functions - - .. autosummary:: - - set_explicit_boundary_data - set_ghostnodes_p2 - diff --git a/docs/source/apis/src.data_assimilation.utils.rst b/docs/source/apis/src.data_assimilation.utils.rst index 68cf40e8..ca0b803c 100644 --- a/docs/source/apis/src.data_assimilation.utils.rst +++ b/docs/source/apis/src.data_assimilation.utils.rst @@ -16,7 +16,6 @@ HSprojector_2t3D HSprojector_3t2D boundary_mask - deepcopy ensemble_inflation obs_noiser set_p2_nodes @@ -32,7 +31,6 @@ .. autosummary:: - BdryType ensemble diff --git a/docs/source/apis/src.dycore.discretisation.time_update.rst b/docs/source/apis/src.dycore.discretisation.time_update.rst index 381c04f4..f5d66d4c 100644 --- a/docs/source/apis/src.dycore.discretisation.time_update.rst +++ b/docs/source/apis/src.dycore.discretisation.time_update.rst @@ -18,6 +18,7 @@ colored data_init deepcopy + do dynamic_timestep euler_backward_non_advective_expl_part euler_backward_non_advective_impl_part @@ -25,7 +26,6 @@ is_nonhydrostatic nonhydrostasy recompute_advective_fluxes - time_update diff --git a/inputs/rising_bubble.py b/inputs/rising_bubble.py index e6fbaa1a..9561e052 100644 --- a/inputs/rising_bubble.py +++ b/inputs/rising_bubble.py @@ -1,6 +1,6 @@ import numpy as np -from dycore.physics.hydrostatics import hydrostatic_state -from dycore.utils.boundary import set_explicit_boundary_data +import dycore.physics.hydrostatics as hydro +import dycore.utils.boundary as bdry class UserData(object): # Nsq_ref = grav * 1.3e-05 @@ -51,7 +51,7 @@ def sol_init(Sol, mpv, elem, node, th, ud, seed=None): g = ud.gravity_strength[1] # print(ud.rho_ref) - hydrostatic_state(mpv, elem, node, th, ud) + hydro.hydrostatic_state(mpv, elem, node, th, ud) x = elem.x y = elem.y @@ -94,6 +94,6 @@ def sol_init(Sol, mpv, elem, node, th, ud, seed=None): rhoY = mpv.HydroState_n.rhoY0[0] mpv.p2_nodes[...] = (p - mpv.HydroState_n.p0[0]) / rhoY / ud.Msq - set_explicit_boundary_data(Sol,elem,ud,th,mpv) + bdry.set_explicit_boundary_data(Sol,elem,ud,th,mpv) return Sol \ No newline at end of file diff --git a/run_scripts/test_suite.py b/run_scripts/test_suite.py index f207540e..c5b97e7c 100644 --- a/run_scripts/test_suite.py +++ b/run_scripts/test_suite.py @@ -1,16 +1,16 @@ # %% from run import run_params as rp -from pybella.tests.diagnostics import compare_sol +import pybella.tests.diagnostics as td import json # The first run MUST have gen_targets = True # This is to generate the target outputs # so that a comparison can be made -gen_targets = True +gen_targets = False # Only set this to True if the diagnostic targets # are to be updated. Required for the first run. -updt_targets = True +updt_targets = False # define run instance rp = rp() @@ -39,7 +39,7 @@ rp.queue_run() if updt_targets: - diag = compare_sol("gen_target") + diag = td.compare_sol("gen_target") diag.update_targets() diff --git a/src/data_assimilation/blending.py b/src/data_assimilation/blending.py index 9fbc5335..5ea937c3 100644 --- a/src/data_assimilation/blending.py +++ b/src/data_assimilation/blending.py @@ -1,17 +1,11 @@ import numpy as np from scipy import signal -import scipy.sparse.linalg as la - -from dycore.physics.gas_dynamics.eos import ( - nonhydrostasy, - compressibility, - is_compressible, - is_nonhydrostatic, -) -import logging -from termcolor import colored -from copy import deepcopy +import dycore.physics.gas_dynamics as eos + +import logging +import termcolor +import copy class Blend(object): @@ -112,7 +106,7 @@ def update_p2n(self, Sol, mpv, node, th, ud): def do_comp_to_psinc_conv(Sol, mpv, bld, elem, node, th, ud, label, writer): - logging.info(colored("Converting COMP to PSINC", "blue")) + logging.info(termcolor.colored("Converting COMP to PSINC", "blue")) dp2n = mpv.p2_nodes bld.convert_p2n(dp2n) bld.update_Sol(Sol, elem, node, th, ud, mpv, "bef", label=label, writer=writer) @@ -126,9 +120,9 @@ def do_psinc_to_comp_conv( ): from dycore.discretisation import time_update - logging.info(colored("Blending... step = %i" % step, "blue")) - Sol_freeze = deepcopy(Sol) - mpv_freeze = deepcopy(mpv) + logging.info(termcolor.colored("Blending... step = %i" % step, "blue")) + Sol_freeze = copy.deepcopy(Sol) + mpv_freeze = copy.deepcopy(mpv) ret = time_update.time_update( Sol, @@ -169,7 +163,7 @@ def do_psinc_to_comp_conv( if writer != None: writer.populate(str(label) + "_after_full_step", "dp2n", dp2n) - logging.info(colored("Converting PSINC to COMP", "blue")) + logging.info(termcolor.colored("Converting PSINC to COMP", "blue")) bld.convert_p2n(dp2n) bld.update_Sol(Sol, elem, node, th, ud, mpv, "aft", label=label, writer=writer) bld.update_p2n(Sol, mpv, node, th, ud) @@ -183,7 +177,7 @@ def do_psinc_to_comp_conv( def do_swe_to_lake_conv(Sol, mpv, elem, node, ud, th, writer, label, debug): - logging.info(colored("swe to lake conversion...", "blue")) + logging.info(termcolor.colored("swe to lake conversion...", "blue")) H1 = Sol.rho[ :, @@ -227,10 +221,10 @@ def do_lake_to_swe_conv( if debug == True: writer.write_all(Sol, mpv, elem, node, th, str(label) + "_after_lake_time_step") - Sol_freeze = deepcopy(Sol) - mpv_freeze = deepcopy(mpv) + Sol_freeze = copy.deepcopy(Sol) + mpv_freeze = copy.deepcopy(mpv) - logging.info(colored("doing lake-to-swe time-update...", "blue")) + logging.info(termcolor.colored("doing lake-to-swe time-update...", "blue")) ret = time_update.time_update( Sol, flux, @@ -260,13 +254,13 @@ def do_lake_to_swe_conv( else: assert 0, "incorrect ud.blending_type" - Sol = deepcopy(Sol_freeze) - mpv = deepcopy(mpv_freeze) + Sol = copy.deepcopy(Sol_freeze) + mpv = copy.deepcopy(mpv_freeze) mpv.p2_nodes[...] = dp2n H10 = mpv.p2_nodes[:, 2:-2, :].mean(axis=1) - logging.info(colored("lake to swe conversion...", "blue")) + logging.info(termcolor.colored("lake to swe conversion...", "blue")) H10 -= H10.mean() # define 2D kernel @@ -301,7 +295,7 @@ def do_nonhydro_to_hydro_conv( Sol, flux, mpv, bld, elem, node, th, ud, label, writer, step, window_step, t, dt ): - logging.info(colored("nonhydrostatic to hydrostatic conversion...", "blue")) + logging.info(termcolor.colored("nonhydrostatic to hydrostatic conversion...", "blue")) # bld.convert_p2n(mpv.p2_nodes) # bld.update_Sol(Sol,elem,node,th,ud,mpv,'bef',label=label,writer=writer) # Sol.rhov = Sol.rhov_half @@ -331,8 +325,8 @@ def do_hydro_to_nonhydro_conv( Sol, flux, mpv, bld, elem, node, th, ud, label, writer, step, window_step, t, dt ): - logging.info(colored("hydrostatic to nonhydrostatic conversion...", "blue")) - logging.info(colored("Blending... step = %i" % step, "blue")) + logging.info(termcolor.colored("hydrostatic to nonhydrostatic conversion...", "blue")) + logging.info(termcolor.colored("Blending... step = %i" % step, "blue")) # Sol_tmp = deepcopy(Sol) # flux_tmp = deepcopy(flux) @@ -542,10 +536,10 @@ def blending_before_timestep( ud.is_nonhydrostatic = 1 ud.nonhydrostasy = 1.0 else: - ud.is_compressible = is_compressible(ud, window_step) - ud.compressibility = compressibility(ud, t, window_step) - ud.is_nonhydrostatic = is_nonhydrostatic(ud, window_step) - ud.nonhydrostasy = nonhydrostasy(ud, t, window_step) + ud.is_compressible = eos.is_compressible(ud, window_step) + ud.compressibility = eos.compressibility(ud, t, window_step) + ud.is_nonhydrostatic = eos.is_nonhydrostatic(ud, window_step) + ud.nonhydrostasy = eos.nonhydrostasy(ud, t, window_step) return swe_to_lake, Sol, mpv, t diff --git a/src/data_assimilation/etpf.py b/src/data_assimilation/etpf.py index 296e8d65..0f0dfbc3 100644 --- a/src/data_assimilation/etpf.py +++ b/src/data_assimilation/etpf.py @@ -1,5 +1,4 @@ import numpy as np - import logging def da_interface(results, obs, obs_attributes, delta, times, tout, N, loc=0): @@ -11,7 +10,6 @@ def da_interface(results, obs, obs_attributes, delta, times, tout, N, loc=0): # local_ens = np.array([mem[inner] for mem in local_ens]) # local_ens = analysis(local_ens,attr) # print(results[:,0,...][0].rho) - attributes = ['rho','rhou','rhov','rhow','rhoY','rhoX'] # attributes = ['rho', 'rhou', 'rhov'] tmp = np.array([getattr(results[:,loc,...][n],obs_attributes[0])[inner] for n in range(N)]) diff --git a/src/data_assimilation/letkf.py b/src/data_assimilation/letkf.py index 0d644a64..7db1063e 100644 --- a/src/data_assimilation/letkf.py +++ b/src/data_assimilation/letkf.py @@ -1,21 +1,17 @@ -from scipy import sparse, linalg -from scipy.sparse.linalg import spsolve -from scipy.ndimage import map_coordinates -from scipy.signal import convolve2d -from scipy.interpolate import interpn -from dycore.utils.options import BdryType -import matplotlib.pyplot as plt +import numpy as np +import numpy.lib.stride_tricks as st + +import scipy.sparse as sp +import scipy.ndimage as ndimage + import dask.array as darr import dask -import numpy as np +import matplotlib.pyplot as plt import logging -import data_assimilation.utils as dau -from numpy.lib.stride_tricks import sliding_window_view -from copy import deepcopy - -from dask.diagnostics import ProgressBar +import dycore.utils.options as opts +import data_assimilation as da debug_cnt = 0 @@ -46,7 +42,7 @@ def da_interface(results,dap,obs,attr,tout,N,ud): local_ens.forward(forward_operator) # local_ens.localisation(localisation_matrix) - obs_covar = sparse.eye((x_obs*y_obs), (x_obs*y_obs), format='csr') + obs_covar = sp.sparse.eye((x_obs*y_obs), (x_obs*y_obs), format='csr') X = local_ens.analyse(obs_current.reshape(-1), obs_covar) local_ens.ensemble = local_ens.to_array(X) @@ -60,7 +56,7 @@ def da_interface(results,dap,obs,attr,tout,N,ud): # let me ust put the forward operator here for now - will need to tidy stuff up.... def interpolation_func(ensemble,x_obs,y_obs,ud): - if ud.bdry_type[0] == BdryType.WALL or ud.bdry_type[1] == BdryType.WALL: + if ud.bdry_type[0] == opts.BdryType.WALL or ud.bdry_type[1] == opts.BdryType.WALL: assert("WALL NOT IMPLEMENTED!") x_ens, y_ens = ensemble[0].shape x = np.linspace(0,x_ens,x_obs) @@ -77,7 +73,7 @@ def interpolation_func(ensemble,x_obs,y_obs,ud): # ensemble = [interpn((x,y),mem,pts, method='splinef2d').reshape(x_obs,y_obs) for mem in ensemble] x,y = np.meshgrid(x,y) - ensemble = [map_coordinates(mem,[y,x],mode='wrap', order=3) for mem in ensemble] + ensemble = [ndimage.map_coordinates(mem,[y,x],mode='wrap', order=3) for mem in ensemble] return np.array(ensemble) @@ -147,7 +143,7 @@ def analyse(self,obs,obs_covar): # self.X -= self.X_mean # R in (m x k) # This is step 4 of the algorithm in Hunt et. al., 2007. - C = spsolve(obs_covar, self.Y.T).T # R in (k x l) + C = sp.spsolve(obs_covar, self.Y.T).T # R in (k x l) # This applies the localisation function to the local region. if self.localisation_matrix is not None: C[...] = ((np.array(self.localisation_matrix)) @ C.T).T @@ -155,7 +151,7 @@ def analyse(self,obs,obs_covar): # The next three lines are step 5 of the algorithm. Pa = (self.no_of_members - 1.) * np.eye(self.no_of_members) / self.rho + (C @ self.Y.T) - Lambda, P = linalg.eigh(Pa) + Lambda, P = sp.linalg.eigh(Pa) Pa = P @ (np.diag(1./Lambda) @ P.T) # This is step 6 of the algorithm. @@ -248,7 +244,7 @@ def __init__(self, ud, elem, node, dap, N, obs_X=5, obs_Y=5): self.pad_Y = int((self.obs_Y - 1) / 2) # get mask to handle BC. Periodic mask includes ghost cells/nodes and wall masks takes only the inner domain. - self.cmask, self.nmask = dau.boundary_mask(ud, elem, node, self.pad_X, self.pad_Y) + self.cmask, self.nmask = da.utils.boundary_mask(ud, elem, node, self.pad_X, self.pad_Y) # get from da parameters the localisation matrix and inflation factor self.inf_fac = dap.inflation_factor @@ -319,7 +315,7 @@ def analyse_by_grid_type(self,results,obs,covar,mask,N,tout,grid_type): # covariance handling if covar is None: # use identity for observation covariance - obs_covar_current = sparse.eye(attr_len*obs_X*obs_Y,attr_len*obs_X*obs_Y, format='csr') + obs_covar_current = sp.sparse.eye(attr_len*obs_X*obs_Y,attr_len*obs_X*obs_Y, format='csr') else: # get covar at current time covar = covar[list(self.da_times).index(tout)] @@ -367,7 +363,7 @@ def analyse_by_grid_type(self,results,obs,covar,mask,N,tout,grid_type): logging.info("Progress of the DA procedure:") # Do the computation of the delayed tasks - with ProgressBar(): + with dask.diagnostics.ProgressBar(): analysis_res = analysis_res.compute() logging.info("===================\n") @@ -415,7 +411,7 @@ def do_analysis(self, attr_len, covar, bc_mask, mask_n, obs_p, X, X_bar, Y, Y_ba # get masked covariance in local domain covar_current = np.ma.array(covar,mask=mask_n[n]).filled(fill_value=np.nan) covar_current = covar_current[~np.isnan(covar_current)] - obs_covar_current = sparse.diags(covar_current.flatten(), format='csr') + obs_covar_current = sp.sparse.diags(covar_current.flatten(), format='csr') # get obs according to sparsity structure obs_pn = obs_p[n][~np.isnan(obs_p[n])] @@ -475,13 +471,13 @@ def get_state(self,state,mask,Nx,Ny,attr_len): state -= X_bar # Decompose X[G] and bar{X[G]} into the local regions view - # X = np.array([dau.sliding_window_view(mem, (1,1), (1,1)).reshape(Nx*Ny,attr_len) for mem in state]) - # X_bar = np.array([dau.sliding_window_view(mem, (1,1), (1,1)).reshape(Nx*Ny,attr_len) for mem in X_bar]) + # X = np.array([dau.st.sliding_window_view(mem, (1,1), (1,1)).reshape(Nx*Ny,attr_len) for mem in state]) + # X_bar = np.array([dau.st.sliding_window_view(mem, (1,1), (1,1)).reshape(Nx*Ny,attr_len) for mem in X_bar]) state = darr.from_array(state) - X = sliding_window_view(state, (1,1), axis=(2,3)).reshape(self.N,attr_len,Nx*Ny) + X = st.sliding_window_view(state, (1,1), axis=(2,3)).reshape(self.N,attr_len,Nx*Ny) X_bar = darr.from_array(X_bar) - X_bar = sliding_window_view(X_bar, (1,1), axis=(2,3)).reshape(1,attr_len,Nx*Ny) + X_bar = st.sliding_window_view(X_bar, (1,1), axis=(2,3)).reshape(1,attr_len,Nx*Ny) X = np.swapaxes(X,1,2) X_bar = np.swapaxes(X_bar,1,2) @@ -504,9 +500,9 @@ def get_obs(self,obs,mask,obs_X,obs_Y,Nx,Ny,attr_len): obs = np.ma.array(obs,mask=mask).filled(fill_value=np.nan) - # obs = np.array([dau.sliding_window_view(obs_attr, (obs_X,obs_Y), (1,1)) for obs_attr in obs]) + # obs = np.array([dau.st.sliding_window_view(obs_attr, (obs_X,obs_Y), (1,1)) for obs_attr in obs]) obs = darr.from_array(obs) - obs = sliding_window_view(obs, (obs_X,obs_Y), axis=(1,2)) + obs = st.sliding_window_view(obs, (obs_X,obs_Y), axis=(1,2)) obs = np.swapaxes(obs,0,2) obs = np.swapaxes(obs,0,1) @@ -553,13 +549,13 @@ def get_state_in_obs_space(self,state,mask,obs_attr,obs_X,obs_Y,Nx,Ny,attr_len): Y_bar = Y_bar.filled(np.nan) # Decompose Y[G] and bar{y[G]} such that the local regions can be accessed easily - # sios = np.array([sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(Nx*Ny,attr_len,obs_X,obs_Y) for mem in sios]) - # Y_bar = np.array([sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(Nx*Ny,attr_len,obs_X,obs_Y) for mem in Y_bar]) + # sios = np.array([st.sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(Nx*Ny,attr_len,obs_X,obs_Y) for mem in sios]) + # Y_bar = np.array([st.sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(Nx*Ny,attr_len,obs_X,obs_Y) for mem in Y_bar]) sios = darr.from_array(sios) - sios = sliding_window_view(sios, (obs_X,obs_Y), axis=(2,3)).reshape(self.N,attr_len,Nx*Ny,obs_X,obs_Y) + sios = st.sliding_window_view(sios, (obs_X,obs_Y), axis=(2,3)).reshape(self.N,attr_len,Nx*Ny,obs_X,obs_Y) Y_bar = darr.from_array(Y_bar) - Y_bar = sliding_window_view(Y_bar, (obs_X,obs_Y), axis=(2,3)).reshape(1,attr_len,Nx*Ny,obs_X,obs_Y) + Y_bar = st.sliding_window_view(Y_bar, (obs_X,obs_Y), axis=(2,3)).reshape(1,attr_len,Nx*Ny,obs_X,obs_Y) ############################### @@ -578,14 +574,14 @@ def get_state_in_obs_space(self,state,mask,obs_attr,obs_X,obs_Y,Nx,Ny,attr_len): # for idx in range(0,self.N): # idx = (self.N - 1) - idx # print(idx) - # sios_tmp[idx] = sliding_window_view(sios[idx], (obs_X,obs_Y),axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) + # sios_tmp[idx] = st.sliding_window_view(sios[idx], (obs_X,obs_Y),axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) # sios_tmp00 = np.zeros((5,attr_len,Nx*Ny,obs_X,obs_Y)) # for idx in range(0,self.N-hN): # print(idx+hN) - # sios_tmp00[idx] = sliding_window_view(sios[idx+hN], (obs_X,obs_Y),axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) + # sios_tmp00[idx] = st.sliding_window_view(sios[idx+hN], (obs_X,obs_Y),axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) #################################### @@ -594,11 +590,11 @@ def get_state_in_obs_space(self,state,mask,obs_attr,obs_X,obs_Y,Nx,Ny,attr_len): # Y_bar_tmp[0] = view_as_windows(Y_bar[0], (attr_len,obs_X,obs_Y)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) # sios_tmp = view_as_windows(sios, (self.N,attr_len,obs_X,obs_Y)).reshape(self.N,Nx*Ny,attr_len,obs_X,obs_Y) - # Y_bar_tmp[0] = sliding_window_view(Y_bar[0], (obs_X,obs_Y),axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) + # Y_bar_tmp[0] = st.sliding_window_view(Y_bar[0], (obs_X,obs_Y),axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) - # sios = np.array([sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) for mem in sios]) - # Y_bar = np.array([sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) for mem in Y_bar]) + # sios = np.array([st.sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) for mem in sios]) + # Y_bar = np.array([st.sliding_window_view(mem, (obs_X,obs_Y), axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) for mem in Y_bar]) # sios = np.array([view_as_windows(mem, (attr_len, obs_X,obs_Y)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) for mem in sios]) @@ -608,7 +604,7 @@ def get_state_in_obs_space(self,state,mask,obs_attr,obs_X,obs_Y,Nx,Ny,attr_len): # Y_bar = Y_bar_tmp - # Y_bar = np.array([dau.sliding_window_view(mem, (obs_X,obs_Y), (1,1)).reshape(Nx*Ny,attr_len,obs_X,obs_Y) for mem in Y_bar]) + # Y_bar = np.array([dau.st.sliding_window_view(mem, (obs_X,obs_Y), (1,1)).reshape(Nx*Ny,attr_len,obs_X,obs_Y) for mem in Y_bar]) sios = np.swapaxes(sios,1,2) Y_bar = np.swapaxes(Y_bar,1,2) @@ -648,15 +644,15 @@ def get_bc_mask(self, mask, type, Nx, Ny, obs_X, obs_Y,attr_len): else: bc_mask *= self.nmask - # bc_mask = np.array(dau.sliding_window_view(bc_mask, (obs_X,obs_Y), (1,1))).reshape(Nx*Ny,obs_X,obs_Y) + # bc_mask = np.array(dau.st.sliding_window_view(bc_mask, (obs_X,obs_Y), (1,1))).reshape(Nx*Ny,obs_X,obs_Y) - # mask_n = np.array(dau.sliding_window_view(mask, (obs_X,obs_Y), (1,1))).reshape(Nx*Ny,attr_len,obs_X,obs_Y) + # mask_n = np.array(dau.st.sliding_window_view(mask, (obs_X,obs_Y), (1,1))).reshape(Nx*Ny,attr_len,obs_X,obs_Y) bc_mask = darr.from_array(bc_mask) - bc_mask = sliding_window_view(bc_mask, (obs_X,obs_Y)).reshape(Nx*Ny,obs_X,obs_Y) + bc_mask = st.sliding_window_view(bc_mask, (obs_X,obs_Y)).reshape(Nx*Ny,obs_X,obs_Y) mask = darr.from_array(mask) - mask_n = sliding_window_view(mask, (obs_X,obs_Y), axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) + mask_n = st.sliding_window_view(mask, (obs_X,obs_Y), axis=(1,2)).reshape(attr_len,Nx*Ny,obs_X,obs_Y) mask_n = np.swapaxes(mask_n,0,1) diff --git a/src/data_assimilation/localisation.py b/src/data_assimilation/localisation.py index e085618c..6069a342 100644 --- a/src/data_assimilation/localisation.py +++ b/src/data_assimilation/localisation.py @@ -1,5 +1,5 @@ import numpy as np -from dycore.utils.options import BdryType +import dycore.utils.options as opts def rlocal_5pt(elem,node,ud): igx = elem.igx @@ -13,11 +13,11 @@ def rlocal_5pt(elem,node,ud): iicxn, iicyn = iicyn, iicxn - x_periodic = ud.bdry_type[1] == BdryType.PERIODIC - y_periodic = ud.bdry_type[0] == BdryType.PERIODIC + x_periodic = ud.bdry_type[1] == opts.BdryType.PERIODIC + y_periodic = ud.bdry_type[0] == opts.BdryType.PERIODIC - x_wall = ud.bdry_type[1] == BdryType.WALL - y_wall = ud.bdry_type[0] == BdryType.WALL + x_wall = ud.bdry_type[1] == opts.BdryType.WALL + y_wall = ud.bdry_type[0] == opts.BdryType.WALL return lambda covar : rlocal_5pt_stencil(covar, iicxn, iicyn, x_periodic, y_periodic, x_wall, y_wall) diff --git a/src/data_assimilation/params.py b/src/data_assimilation/params.py index 8dad80a2..eaead697 100644 --- a/src/data_assimilation/params.py +++ b/src/data_assimilation/params.py @@ -1,9 +1,8 @@ import numpy as np +import scipy as sp import h5py -from scipy import stats # generate Gaussian localisation matrix -from scipy import signal -from dycore.utils.boundary import set_explicit_boundary_data, set_ghostnodes_p2 # for converter +import dycore.utils.boundary as bdry import logging @@ -136,7 +135,7 @@ def converter(results, N, mpv, elem, node, th, ud): g = ud.g0 for n in range(N): - set_explicit_boundary_data(results[n][0],elem,ud,th,mpv) + bdry.set_explicit_boundary_data(results[n][0],elem,ud,th,mpv) results[n][0].rhoY[...] = (g / 2.0 * results[n][0].rho**2)**th.gamminv igy = elem.igy @@ -144,14 +143,14 @@ def converter(results, N, mpv, elem, node, th, ud): kernel = np.ones((2,2)) kernel /= kernel.sum() - pn = signal.convolve(results[n][0].rhoY[:,igy,:], kernel, mode='valid') + pn = sp.signal.convolve(results[n][0].rhoY[:,igy,:], kernel, mode='valid') - set_explicit_boundary_data(results[n][0],elem,ud,th,mpv) + bdry.set_explicit_boundary_data(results[n][0],elem,ud,th,mpv) pn = np.expand_dims(pn, 1) pn = np.repeat(pn, node.icy, axis=1) results[n][2].p2_nodes[1:-1,:,1:-1] = pn - set_ghostnodes_p2(results[n][2].p2_nodes,node,ud) + bdry.set_ghostnodes_p2(results[n][2].p2_nodes,node,ud) pn = np.expand_dims(results[n][2].p2_nodes[:,igy,:], 1) results[n][2].p2_nodes[...] = np.repeat(pn[...], node.icy, axis=1) @@ -207,7 +206,7 @@ def get_loc_mat(self, type, c=1.0): elif type == 'gaussian': pos = np.array([X.flatten(),Y.flatten()]).T - norm = stats.multivariate_normal([0,0],[[0.05,0.0],[0.0,0.05]]) + norm = sp.stats.multivariate_normal([0,0],[[0.05,0.0],[0.0,0.05]]) Z = norm.pdf(pos).reshape(X.shape[0],X.shape[1]) Z = Z - Z.min() Z /= Z.max() diff --git a/src/data_assimilation/utils.py b/src/data_assimilation/utils.py index cd9a3092..f5b04596 100644 --- a/src/data_assimilation/utils.py +++ b/src/data_assimilation/utils.py @@ -1,10 +1,11 @@ import numpy as np -from scipy import signal -from copy import deepcopy +import scipy as sp +import copy + import matplotlib.pyplot as plt -from dycore.utils import boundary -from dycore.utils.options import BdryType +import dycore.utils.boundary as bdry +import dycore.utils.options as opts class ensemble(object): def __init__(self, input_ensemble=None): @@ -20,7 +21,7 @@ def __init__(self, input_ensemble=None): def initialise_members(self,ic,N): for cnt in range(N): - mem = [deepcopy(arr) for arr in ic] + mem = [copy.deepcopy(arr) for arr in ic] # mem = sampler(mem) setattr(self,'mem_' + str(cnt),mem) @@ -72,13 +73,13 @@ def set_p2_nodes(analysis,results,N,th,node,ud,loc_c=0,loc_n=2): p2_n = getattr(results[n][loc_n],'p2_nodes') rhoY_n = np.zeros_like(p2_n) kernel = np.array([[1.,1.],[1.,1.]]) - rhoY_n[1:-1,1:-1] = signal.fftconvolve(rhoY,kernel,mode='valid') / kernel.sum() - boundary.set_ghostnodes_p2(rhoY_n,node,ud) + rhoY_n[1:-1,1:-1] = sp.signal.fftconvolve(rhoY,kernel,mode='valid') / kernel.sum() + bdry.set_ghostnodes_p2(rhoY_n,node,ud) p2_n = rhoY_n**th.gm1 - 1.0 + (p2_n - p2_n.mean()) # p2_n = rhoY_n**th.gm1 - 1.0 p2_n -= p2_n.mean() # p2_n = np.pad(p2_n,2,mode='wrap') - boundary.set_ghostnodes_p2(p2_n,node,ud) + bdry.set_ghostnodes_p2(p2_n,node,ud) setattr(results[n][loc_n],'p2_nodes',p2_n) def set_rhoY_cells(analysis,results,N,th,ud,loc_c=0,loc_n=2): @@ -86,7 +87,7 @@ def set_rhoY_cells(analysis,results,N,th,ud,loc_c=0,loc_n=2): p2n = analysis[n] rhoYc0 = getattr(results[n][loc_c], 'rhoY') kernel = np.array([[1.,1.],[1.,1.]]) - p2c = signal.fftconvolve(p2n,kernel,mode='valid') / kernel.sum() + p2c = sp.signal.fftconvolve(p2n,kernel,mode='valid') / kernel.sum() p2c -= p2c.mean() rhoYc = (rhoYc0**th.gm1 + ud.Msq*p2c)**th.gm1inv @@ -107,11 +108,11 @@ def boundary_mask(ud,elem,node,pad_X,pad_Y): # ghost_padding[dim] = [elem.igs[dim],elem.igs[dim]] ghost_padding[dim] = [pads[dim],pads[dim]] - if ud.bdry_type[dim] == BdryType.PERIODIC: + if ud.bdry_type[dim] == opts.BdryType.PERIODIC: cmask = np.pad(cmask, ghost_padding, mode='constant', constant_values=(1.0)) nmask = np.pad(nmask, ghost_padding, mode='constant', constant_values=(1.0)) - elif ud.bdry_type[dim] == BdryType.WALL: + elif ud.bdry_type[dim] == opts.BdryType.WALL: cmask = np.pad(cmask, ghost_padding, mode='constant', constant_values=(0.0)) nmask = np.pad(nmask, ghost_padding, mode='constant', constant_values=(0.0)) @@ -125,11 +126,11 @@ def boundary_mask(ud,elem,node,pad_X,pad_Y): # ghost_padding[dim] = [elem.igs[dim],elem.igs[dim]] ghost_padding[dim] = [pads[dim],pads[dim]] - if ud.bdry_type[dim] == BdryType.PERIODIC: + if ud.bdry_type[dim] == opts.BdryType.PERIODIC: cmask = np.pad(cmask, ghost_padding, mode='constant', constant_values=(1.0)) nmask = np.pad(nmask, ghost_padding, mode='constant', constant_values=(1.0)) - elif ud.bdry_type[dim] == BdryType.WALL: + elif ud.bdry_type[dim] == opts.BdryType.WALL: cmask = np.pad(cmask, ghost_padding, mode='constant', constant_values=(0.0)) nmask = np.pad(nmask, ghost_padding, mode='constant', constant_values=(0.0)) @@ -236,6 +237,7 @@ def HSprojector_3t2D(results, elem, dap, N): results : nd.array An array of ensemble size k. Each ensemble member has [Sol,flux,mpv,[window_step,step]]. dap : data assimilation input class + . N : int ensemble size. @@ -282,7 +284,7 @@ def sparse_obs_selector(obs, elem, node, ud, dap): sparse_obs = dap.sparse_obs if not sparse_obs or len(dap.da_times) == 0: - mask = deepcopy(obs) + mask = copy.deepcopy(obs) for tt,mask_t in enumerate(mask): for key, _ in mask_t.items(): mask[tt][key][...] = 0.0 @@ -323,7 +325,7 @@ def sparse_obs_selector(obs, elem, node, ud, dap): Xn, Yn = node.x, node.y Xn, Yn = np.meshgrid(Xn, Yn) - mask_arr = deepcopy(obs) + mask_arr = copy.deepcopy(obs) # obs_noisy_interp = deepcopy(obs) # obs is a list of dictionaries, list length da_len, dictionary length attr_len. @@ -389,7 +391,7 @@ def obs_noiser(obs,mask,dap,rloc,elem): obs_covar_c = np.zeros((len(dap.da_times),rloc.cattr_len)) obs_covar_n = np.zeros((len(dap.da_times),rloc.nattr_len)) - obs_noisy = deepcopy(obs) + obs_noisy = copy.deepcopy(obs) std_dev = np.zeros((obs.shape[0],len(dap.obs_noise_seeds)))