diff --git a/examples/all_sky_example.ipynb b/examples/all_sky_example.ipynb new file mode 100644 index 0000000..6754663 --- /dev/null +++ b/examples/all_sky_example.ipynb @@ -0,0 +1,197 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import xarray as xr\n", + "\n", + "from pyrte_rrtmgp.rrtmgp_gas_optics import GasOpticsFiles, load_gas_optics\n", + "from pyrte_rrtmgp.rrtmgp_data import download_rrtmgp_data\n", + "from pyrte_rrtmgp.rte_solver import RTESolver\n", + "from pyrte_rrtmgp.all_skys_funcs import (\n", + " compute_clouds,\n", + " compute_cloud_optics,\n", + " combine_optical_props,\n", + ")\n", + "from pyrte_rrtmgp.utils import compute_profiles, create_gas_dataset\n", + "\n", + "# Set up paths and dimensions\n", + "rte_rrtmgp_dir = download_rrtmgp_data()\n", + "rfmip_dir = os.path.join(rte_rrtmgp_dir, \"examples\", \"all-sky\")\n", + "ref_dir = os.path.join(rfmip_dir, \"reference\")\n", + "lw_clouds = os.path.join(rte_rrtmgp_dir, \"rrtmgp-clouds-lw-bnd.nc\")\n", + "\n", + "ncol = 24\n", + "nlay = 72\n", + "\n", + "# Create atmospheric profiles and gas concentrations\n", + "profiles = compute_profiles(300, ncol, nlay)\n", + "gas_values = {\n", + " \"co2\": 348e-6,\n", + " \"ch4\": 1650e-9,\n", + " \"n2o\": 306e-9,\n", + " \"n2\": 0.7808,\n", + " \"o2\": 0.2095,\n", + " \"co\": 0.0,\n", + "}\n", + "gases = create_gas_dataset(gas_values, dims={\"site\": ncol, \"layer\": nlay})\n", + "\n", + "# Set up atmosphere dataset\n", + "atmosphere = xr.merge([profiles, gases])\n", + "top_at_1 = (\n", + " atmosphere[\"pres_layer\"].values[0, 0] < atmosphere[\"pres_layer\"].values[0, -1]\n", + ")\n", + "t_sfc = profiles[\"temp_level\"][:, nlay if top_at_1 else 0]\n", + "atmosphere[\"surface_temperature\"] = xr.DataArray(t_sfc, dims=[\"site\"])\n", + "\n", + "# Calculate gas optical properties\n", + "gas_optics_lw = load_gas_optics(gas_optics_file=GasOpticsFiles.LW_G256)\n", + "clear_sky_optical_props = gas_optics_lw.gas_optics.compute(\n", + " atmosphere, problem_type=\"absorption\", add_to_input=False\n", + ")\n", + "clear_sky_optical_props[\"surface_emissivity\"] = 0.98\n", + "\n", + "# Calculate cloud properties and optical properties\n", + "cloud_optics = xr.load_dataset(lw_clouds)\n", + "cloud_properties = compute_clouds(\n", + " cloud_optics, ncol, nlay, profiles[\"pres_layer\"], profiles[\"temp_layer\"]\n", + ")\n", + "clouds_optical_props = compute_cloud_optics(cloud_properties, cloud_optics)\n", + "\n", + "# Combine optical properties and solve RTE\n", + "combined_optical_props = combine_optical_props(clouds_optical_props, clear_sky_optical_props)\n", + "solver = RTESolver()\n", + "fluxes = solver.solve(combined_optical_props, add_to_input=False)\n", + "\n", + "# Load reference data and verify results\n", + "ref_data = xr.load_dataset(\n", + " os.path.join(ref_dir, \"rrtmgp-allsky-lw-no-aerosols.nc\"),\n", + " decode_cf=False,\n", + ")\n", + "assert np.isclose(fluxes[\"lw_flux_up\"].values, ref_data[\"lw_flux_up\"].values.T, atol=1e-7).all()\n", + "assert np.isclose(fluxes[\"lw_flux_down\"].values, ref_data[\"lw_flux_dn\"].values.T, atol=1e-7).all()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "import numpy as np\n", + "import xarray as xr\n", + "from pyrte_rrtmgp.rrtmgp_gas_optics import GasOpticsFiles, load_gas_optics\n", + "from pyrte_rrtmgp.rrtmgp_data import download_rrtmgp_data\n", + "from pyrte_rrtmgp.rte_solver import RTESolver\n", + "from pyrte_rrtmgp.all_skys_funcs import (\n", + " compute_clouds,\n", + " compute_cloud_optics,\n", + " combine_optical_props,\n", + " delta_scale_optical_props,\n", + ")\n", + "from pyrte_rrtmgp.utils import compute_profiles, create_gas_dataset\n", + "\n", + "# Set up paths and dimensions\n", + "rte_rrtmgp_dir = download_rrtmgp_data()\n", + "rfmip_dir = os.path.join(rte_rrtmgp_dir, \"examples\", \"all-sky\")\n", + "ref_dir = os.path.join(rfmip_dir, \"reference\")\n", + "sw_clouds = os.path.join(rte_rrtmgp_dir, \"rrtmgp-clouds-sw-bnd.nc\")\n", + "\n", + "ncol = 24\n", + "nlay = 72\n", + "\n", + "# Create atmospheric profiles and gas concentrations\n", + "profiles = compute_profiles(300, ncol, nlay)\n", + "gas_values = {\n", + " \"co2\": 348e-6,\n", + " \"ch4\": 1650e-9,\n", + " \"n2o\": 306e-9,\n", + " \"n2\": 0.7808,\n", + " \"o2\": 0.2095,\n", + " \"co\": 0.0,\n", + "}\n", + "gases = create_gas_dataset(gas_values, dims={\"site\": ncol, \"layer\": nlay})\n", + "\n", + "# Set up atmosphere dataset\n", + "atmosphere = xr.merge([profiles, gases])\n", + "top_at_1 = (\n", + " atmosphere[\"pres_layer\"].values[0, 0] < atmosphere[\"pres_layer\"].values[0, -1]\n", + ")\n", + "\n", + "# Add SW-specific surface and angle properties\n", + "gas_optics_sw = load_gas_optics(gas_optics_file=GasOpticsFiles.SW_G224)\n", + "ngpt = gas_optics_sw.sizes[\"gpt\"]\n", + "\n", + "atmosphere[\"surface_albedo_dir\"] = xr.DataArray(\n", + " 0.06,\n", + " dims=[\"site\", \"gpt\"],\n", + " coords={\"site\": np.arange(ncol), \"gpt\": np.arange(ngpt)},\n", + ")\n", + "atmosphere[\"surface_albedo_dif\"] = xr.DataArray(\n", + " 0.06,\n", + " dims=[\"site\", \"gpt\"],\n", + " coords={\"site\": np.arange(ncol), \"gpt\": np.arange(ngpt)},\n", + ")\n", + "atmosphere[\"mu0\"] = xr.DataArray(\n", + " 0.86,\n", + " dims=[\"site\", \"layer\"],\n", + " coords={\"site\": np.arange(ncol), \"layer\": np.arange(nlay)},\n", + ")\n", + "\n", + "# Calculate gas optical properties\n", + "clear_sky_optical_props = gas_optics_sw.gas_optics.compute(\n", + " atmosphere, problem_type=\"two-stream\", add_to_input=False\n", + ")\n", + "\n", + "# Calculate cloud properties and optical properties\n", + "cloud_optics = xr.load_dataset(sw_clouds)\n", + "cloud_properties = compute_clouds(\n", + " cloud_optics, ncol, nlay, profiles[\"pres_layer\"], profiles[\"temp_layer\"]\n", + ")\n", + "clouds_optical_props = compute_cloud_optics(cloud_properties, cloud_optics, lw=False)\n", + "clouds_optical_props = delta_scale_optical_props(clouds_optical_props)\n", + "\n", + "# Combine optical properties and solve RTE\n", + "combined_optical_props = combine_optical_props(clouds_optical_props, clear_sky_optical_props)\n", + "solver = RTESolver()\n", + "fluxes = solver.solve(combined_optical_props, add_to_input=False)\n", + "\n", + "# Load reference data and verify results\n", + "ref_data = xr.load_dataset(\n", + " os.path.join(ref_dir, \"rrtmgp-allsky-sw-no-aerosols.nc\"),\n", + " decode_cf=False,\n", + ")\n", + "assert np.isclose(fluxes[\"sw_flux_up\"].values, ref_data[\"sw_flux_up\"].values.T, atol=1e-7).all()\n", + "assert np.isclose(fluxes[\"sw_flux_down\"].values, ref_data[\"sw_flux_dn\"].values.T, atol=1e-7).all()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pyrte", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/lw_example.ipynb b/examples/lw_example.ipynb index 03ac7cb..0a2107c 100644 --- a/examples/lw_example.ipynb +++ b/examples/lw_example.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -40,10 +40,10 @@ "rld = xr.load_dataset(rld_reference, decode_cf=False)\n", "\n", "assert np.isclose(\n", - " fluxes[\"lw_flux_up_broadband\"], rlu[\"rlu\"], atol=ERROR_TOLERANCE\n", + " fluxes[\"lw_flux_up\"], rlu[\"rlu\"], atol=ERROR_TOLERANCE\n", ").all()\n", "assert np.isclose(\n", - " fluxes[\"lw_flux_down_broadband\"], rld[\"rld\"], atol=ERROR_TOLERANCE\n", + " fluxes[\"lw_flux_down\"], rld[\"rld\"], atol=ERROR_TOLERANCE\n", ").all()" ] } diff --git a/examples/sw_example.ipynb b/examples/sw_example.ipynb index a83c75f..d843690 100644 --- a/examples/sw_example.ipynb +++ b/examples/sw_example.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ diff --git a/pybind_interface.cpp b/pybind_interface.cpp index 0ef489b..9cf745c 100644 --- a/pybind_interface.cpp +++ b/pybind_interface.cpp @@ -1960,7 +1960,7 @@ PYBIND11_MODULE(pyrte_rrtmgp, m) { []( int ncol, int nlay, - int nbnd, + int ngpt, py::array_t mask, py::array_t lwp, py::array_t re, @@ -1974,19 +1974,19 @@ PYBIND11_MODULE(pyrte_rrtmgp, m) { py::array_t taussa, py::array_t taussag ) { - if (ncol <= 0 || nlay <= 0 || nbnd <= 0 || nsteps <= 0) { - throw std::runtime_error("ncol, nlay, nbnd and nsteps must be positive integers"); + if (ncol <= 0 || nlay <= 0 || ngpt <= 0 || nsteps <= 0) { + throw std::runtime_error("ncol, nlay, ngpt and nsteps must be positive integers"); } if (mask.size() != ncol * nlay) throw std::runtime_error("Invalid size for input array 'mask'"); if (lwp.size() != ncol * nlay) throw std::runtime_error("Invalid size for input array 'lwp'"); if (re.size() != ncol * nlay) throw std::runtime_error("Invalid size for input array 're'"); - if (tau_table.size() != nsteps * nbnd) throw std::runtime_error("Invalid size for input array 'tau_table'"); - if (ssa_table.size() != nsteps * nbnd) throw std::runtime_error("Invalid size for input array 'ssa_table'"); - if (asy_table.size() != nsteps * nbnd) throw std::runtime_error("Invalid size for input array 'asy_table'"); - if (tau.size() != ncol * nlay * nbnd) throw std::runtime_error("Invalid size for input array 'tau'"); - if (taussa.size() != ncol * nlay * nbnd) throw std::runtime_error("Invalid size for input array 'taussa'"); - if (taussag.size() != ncol * nlay * nbnd) throw std::runtime_error("Invalid size for input array 'taussag'"); + if (tau_table.size() != nsteps * ngpt) throw std::runtime_error("Invalid size for input array 'tau_table'"); + if (ssa_table.size() != nsteps * ngpt) throw std::runtime_error("Invalid size for input array 'ssa_table'"); + if (asy_table.size() != nsteps * ngpt) throw std::runtime_error("Invalid size for input array 'asy_table'"); + if (tau.size() != ncol * nlay * ngpt) throw std::runtime_error("Invalid size for input array 'tau'"); + if (taussa.size() != ncol * nlay * ngpt) throw std::runtime_error("Invalid size for input array 'taussa'"); + if (taussag.size() != ncol * nlay * ngpt) throw std::runtime_error("Invalid size for input array 'taussag'"); py::buffer_info buf_mask = mask.request(); py::buffer_info buf_lwp = lwp.request(); @@ -2001,7 +2001,7 @@ PYBIND11_MODULE(pyrte_rrtmgp, m) { fortran::rrtmgp_compute_cld_from_table( ncol, nlay, - nbnd, + ngpt, reinterpret_cast(buf_mask.ptr), reinterpret_cast(buf_lwp.ptr), reinterpret_cast(buf_re.ptr), diff --git a/pyproject.toml b/pyproject.toml index f0c90a4..776a120 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,7 @@ extend_skip_glob = ["!pyrte_rrtmgp/tests/test_python_frontend/*"] [tool.flake8] max-line-length = 88 -extend-ignore = ["E203", "W503", "D100", "D101", "D102", "D103", "D104", "D107", "E501"] +extend-ignore = ["E203", "W503", "F401", "D100", "D101", "D102", "D103", "D104", "D107", "E501"] exclude = [ ".git", "__pycache__", diff --git a/pyrte_rrtmgp/all_skys_funcs.py b/pyrte_rrtmgp/all_skys_funcs.py new file mode 100644 index 0000000..52e7bf1 --- /dev/null +++ b/pyrte_rrtmgp/all_skys_funcs.py @@ -0,0 +1,542 @@ +import numpy as np +import xarray as xr + +from pyrte_rrtmgp.kernels.rrtmgp import compute_cld_from_table +from pyrte_rrtmgp.kernels.rte import ( + delta_scale_2str, + delta_scale_2str_f, + inc_1scalar_by_1scalar_bybnd, + inc_1scalar_by_2stream_bybnd, + inc_2stream_by_1scalar_bybnd, + inc_2stream_by_2stream_bybnd, + increment_1scalar_by_1scalar, + increment_1scalar_by_2stream, + increment_2stream_by_1scalar, + increment_2stream_by_2stream, +) + + +def compute_clouds(cloud_optics, ncol, nlay, p_lay, t_lay): + """Compute cloud properties for radiative transfer calculations. + + Args: + cloud_optics: Dataset containing cloud optics data + ncol: Number of columns + nlay: Number of layers + p_lay: Pressure profile + t_lay: Temperature profile + """ + # Get min/max radii values for liquid and ice + rel_val = 0.5 * (cloud_optics["radliq_lwr"] + cloud_optics["radliq_upr"]) + rei_val = 0.5 * (cloud_optics["diamice_lwr"] + cloud_optics["diamice_upr"]) + + # Create coordinate arrays + site = np.arange(ncol) + layer = np.arange(nlay) + + # Convert inputs to xarray DataArrays if they aren't already + p_lay = xr.DataArray( + p_lay, dims=["site", "layer"], coords={"site": site, "layer": layer} + ) + t_lay = xr.DataArray( + t_lay, dims=["site", "layer"], coords={"site": site, "layer": layer} + ) + + # Create cloud mask using xarray operations + cloud_mask = ( + (p_lay > 100 * 100) & (p_lay < 900 * 100) & ((site + 1) % 3 != 0).reshape(-1, 1) + ) + + # Initialize arrays as DataArrays with zeros + lwp = xr.zeros_like(p_lay) + iwp = xr.zeros_like(p_lay) + rel = xr.zeros_like(p_lay) + rei = xr.zeros_like(p_lay) + + # Set values where clouds exist using xarray where operations + lwp = lwp.where(~(cloud_mask & (t_lay > 263)), 10.0) + rel = rel.where(~(cloud_mask & (t_lay > 263)), rel_val) + + iwp = iwp.where(~(cloud_mask & (t_lay < 273)), 10.0) + rei = rei.where(~(cloud_mask & (t_lay < 273)), rei_val) + + return xr.Dataset( + { + "lwp": lwp, + "iwp": iwp, + "rel": rel, + "rei": rei, + } + ) + + +def compute_cloud_optics(cloud_properties, cloud_optics, lw=True): + """ + Compute cloud optical properties for liquid and ice clouds. + + Args: + cloud_properties: Dataset containing cloud properties + cloud_optics: Dataset containing cloud optics data + lw (bool): Whether to compute liquid water phase (True) or ice water phase (False) + + Returns: + tuple: Arrays of optical properties for both liquid and ice phases + """ + # Get dimensions + ncol, nlay = cloud_properties.sizes["site"], cloud_properties.sizes["layer"] + + # Create cloud masks + liq_mask = cloud_properties.lwp > 0 + ice_mask = cloud_properties.iwp > 0 + + # Check if cloud optics data is initialized + if not hasattr(cloud_optics, "extliq"): + raise ValueError("Cloud optics: no data has been initialized") + + # Validate particle sizes are within bounds + if np.any( + (cloud_properties.rel.where(liq_mask) < cloud_optics.radliq_lwr.values) + | (cloud_properties.rel.where(liq_mask) > cloud_optics.radliq_upr.values) + ): + raise ValueError("Cloud optics: liquid effective radius is out of bounds") + + if np.any( + (cloud_properties.rei.where(ice_mask) < cloud_optics.diamice_lwr.values) + | (cloud_properties.rei.where(ice_mask) > cloud_optics.diamice_upr.values) + ): + raise ValueError("Cloud optics: ice effective radius is out of bounds") + + # Check for negative water paths + if np.any(cloud_properties.lwp.where(liq_mask) < 0) or np.any( + cloud_properties.iwp.where(ice_mask) < 0 + ): + raise ValueError( + "Cloud optics: negative lwp or iwp where clouds are supposed to be" + ) + + # Determine if we're using band-averaged or spectral properties + gpt_dim = "nband" if "gpt" not in cloud_optics.sizes else "gpt" + gpt_out_dim = "bnd" if gpt_dim == "nband" else "gpt" + ngpt = cloud_optics.sizes["nband" if gpt_dim == "nband" else "gpt"] + + # Compute optical properties using lookup tables + # Liquid phase + step_size = (cloud_optics.radliq_upr - cloud_optics.radliq_lwr) / ( + cloud_optics.sizes["nsize_liq"] - 1 + ) + + ltau, ltaussa, ltaussag = xr.apply_ufunc( + compute_cld_from_table, + ncol, + nlay, + ngpt, + liq_mask, + cloud_properties.lwp, + cloud_properties.rel, + cloud_optics.sizes["nsize_liq"], + step_size.values, + cloud_optics.radliq_lwr.values, + cloud_optics.extliq, + cloud_optics.ssaliq, + cloud_optics.asyliq, + input_core_dims=[ + [], # ncol + [], # nlay + [], + ["site", "layer"], # liq_mask + ["site", "layer"], # lwp + ["site", "layer"], # rel + [], # nsize_liq + [], # step_size + [], # radliq_lwr + ["nsize_liq", gpt_dim], # extliq + ["nsize_liq", gpt_dim], # ssaliq + ["nsize_liq", gpt_dim], # asyliq + ], + output_core_dims=[ + ["site", "layer", gpt_out_dim], # ltau + ["site", "layer", gpt_out_dim], # ltaussa + ["site", "layer", gpt_out_dim], # ltaussag + ], + vectorize=True, + dask="allowed", + ) + + # Ice phase + step_size = (cloud_optics.diamice_upr - cloud_optics.diamice_lwr) / ( + cloud_optics.sizes["nsize_ice"] - 1 + ) + ice_roughness = 1 + + itau, itaussa, itaussag = xr.apply_ufunc( + compute_cld_from_table, + ncol, + nlay, + ngpt, + ice_mask, + cloud_properties.iwp, + cloud_properties.rei, + cloud_optics.sizes["nsize_ice"], + step_size.values, + cloud_optics.diamice_lwr.values, + cloud_optics.extice[ice_roughness, :, :], + cloud_optics.ssaice[ice_roughness, :, :], + cloud_optics.asyice[ice_roughness, :, :], + input_core_dims=[ + [], # ncol + [], # nlay + [], # ngpt + ["site", "layer"], # ice_mask + ["site", "layer"], # iwp + ["site", "layer"], # rei + [], # nsize_ice + [], # step_size + [], # diamice_lwr + ["nsize_ice", gpt_dim], # extice + ["nsize_ice", gpt_dim], # ssaice + ["nsize_ice", gpt_dim], # asyice + ], + output_core_dims=[ + ["site", "layer", gpt_out_dim], # itau + ["site", "layer", gpt_out_dim], # itaussa + ["site", "layer", gpt_out_dim], # itaussag + ], + vectorize=True, + dask="allowed", + ) + + # Combine liquid and ice contributions + if lw: + tau = (ltau - ltaussa) + (itau - itaussa) + return tau.to_dataset(name="tau") + else: + tau = ltau + itau + taussa = ltaussa + itaussa + taussag = ltaussag + itaussag + + # Calculate derived quantities + ssa = xr.where(tau > np.finfo(float).eps, taussa / tau, 0.0) + g = xr.where(taussa > np.finfo(float).eps, taussag / taussa, 0.0) + + return xr.Dataset({"tau": tau, "ssa": ssa, "g": g}) + + +def combine_optical_props(op1, op2): + """Combine two sets of optical properties, modifying op1 in place. + + Args: + op1: First set of optical properties, will be modified. + op2: Second set of optical properties to add. + """ + ncol = op2.sizes["site"] + nlay = op2.sizes["layer"] + ngpt = op2.sizes["gpt"] + + # Check if input has only tau (1-stream) or tau, ssa, g (2-stream) + + is_1stream_1 = "tau" in list(op1.data_vars) and "ssa" not in list(op1.data_vars) + is_1stream_2 = "tau" in list(op2.data_vars) and "ssa" not in list(op2.data_vars) + + if "gpt" in op1["tau"].sizes: + if is_1stream_1: + if is_1stream_2: + # 1-stream by 1-stream + combined_tau = xr.apply_ufunc( + increment_1scalar_by_1scalar, + ncol, + nlay, + ngpt, + op2["tau"], + op1["tau"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ], + output_core_dims=[["site", "layer", "gpt"]], + dask="allowed", + ) + op2["tau"] = combined_tau + else: + # 1-stream by 2-stream + combined_tau = xr.apply_ufunc( + increment_1scalar_by_2stream, + ncol, + nlay, + ngpt, + op2["tau"], + op1["tau"], + op1["ssa"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ], + output_core_dims=[["site", "layer", "gpt"]], + dask="allowed", + ) + op2["tau"] = combined_tau + else: # 2-stream output + if is_1stream_2: + # 2-stream by 1-stream + combined_tau, combined_ssa = xr.apply_ufunc( + increment_2stream_by_1scalar, + ncol, + nlay, + ngpt, + op2["tau"], + op2["ssa"], + op1["tau"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ], + output_core_dims=[ + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ], + dask="allowed", + ) + op2["tau"] = combined_tau + op2["ssa"] = combined_ssa + else: + # 2-stream by 2-stream + combined_tau, combined_ssa, combined_g = xr.apply_ufunc( + increment_2stream_by_2stream, + ncol, + nlay, + ngpt, + op2["tau"], + op2["ssa"], + op2["g"], + op1["tau"], + op1["ssa"], + op1["g"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ], + output_core_dims=[ + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ], + dask="allowed", + ) + op2["tau"] = combined_tau + op2["ssa"] = combined_ssa + op2["g"] = combined_g + return op2 + + else: + # By-band increment (when op2's ngpt equals op1's nband) + if op2.sizes["bnd"] != op1.sizes["bnd"]: + raise ValueError("Incompatible g-point structures for by-band increment") + + if is_1stream_1: + if is_1stream_2: + # 1-stream by 1-stream by band + combined_tau = xr.apply_ufunc( + inc_1scalar_by_1scalar_bybnd, + ncol, + nlay, + ngpt, + op2["tau"], + op1["tau"], + op2.sizes["bnd"], + op2["bnd_limits_gpt"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "bnd"], + [], + ["pair", "bnd"], + ], + output_core_dims=[["site", "layer", "gpt"]], + dask="allowed", + ) + op2["tau"] = combined_tau + return op2 + else: + # 1-stream by 2-stream by band + combined_tau = xr.apply_ufunc( + inc_1scalar_by_2stream_bybnd, + ncol, + nlay, + ngpt, + op2["tau"], + op1["tau"], + op1["ssa"], + op2.sizes["bnd"], + op2["bnd_limits_gpt"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "bnd"], + ["site", "layer", "bnd"], + [], + ["pair", "bnd"], + ], + output_core_dims=[["site", "layer", "gpt"]], + dask="allowed", + ) + op2["tau"] = combined_tau + return op2 + else: + if is_1stream_2: + # 2-stream by 1-stream by band + combined_tau = xr.apply_ufunc( + inc_2stream_by_1scalar_bybnd, + ncol, + nlay, + ngpt, + op2["tau"], + op2["ssa"], + op1["tau"], + op2.sizes["bnd"], + op2["bnd_limits_gpt"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "bnd"], + [], + ["pair", "bnd"], + ], + output_core_dims=[["site", "layer", "gpt"]], + dask="allowed", + ) + op2["tau"] = combined_tau + return op2 + else: + # 2-stream by 2-stream by band + combined_tau = xr.apply_ufunc( + inc_2stream_by_2stream_bybnd, + ncol, + nlay, + ngpt, + op2["tau"], + op2["ssa"], + op2["g"], + op1["tau"], + op1["ssa"], + op1["g"], + op2.sizes["bnd"], + op2["bnd_limits_gpt"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "gpt"], + ["site", "layer", "bnd"], + ["site", "layer", "bnd"], + ["site", "layer", "bnd"], + [], + ["pair", "bnd"], + ], + output_core_dims=[["site", "layer", "gpt"]], + dask="allowed", + ) + op2["tau"] = combined_tau + return op2 + + +def delta_scale_optical_props(optical_props, forward_scattering=None): + """Apply delta scaling to 2-stream optical properties. + + Args: + optical_props: xarray Dataset containing tau, ssa, and g variables + forward_scattering: Optional array of forward scattering fraction (g**2 if not provided) + Must have shape (ncol, nlay, ngpt) if provided + + Raises: + ValueError: If forward_scattering array has incorrect dimensions or values outside [0,1] + """ + # Get dimensions + ncol = optical_props.sizes["site"] + nlay = optical_props.sizes["layer"] + gpt_dim = "gpt" if "gpt" in optical_props.sizes else "bnd" + ngpt = optical_props.sizes[gpt_dim] + + # Call kernel with forward scattering + if forward_scattering is not None: + tau, ssa, g = xr.apply_ufunc( + delta_scale_2str_f, + ncol, + nlay, + ngpt, + optical_props["tau"], + optical_props["ssa"], + optical_props["g"], + forward_scattering, + input_core_dims=[ + [], + [], + [], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ], + output_core_dims=[ + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ], + dask="allowed", + ) + else: + tau, ssa, g = xr.apply_ufunc( + delta_scale_2str, + ncol, + nlay, + ngpt, + optical_props["tau"], + optical_props["ssa"], + optical_props["g"], + input_core_dims=[ + [], + [], + [], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ], + output_core_dims=[ + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ["site", "layer", gpt_dim], + ], + dask="allowed", + ) + + # Update the dataset with modified values + optical_props["tau"] = tau + optical_props["ssa"] = ssa + optical_props["g"] = g + + return optical_props diff --git a/pyrte_rrtmgp/config.py b/pyrte_rrtmgp/config.py index 314e9b7..41830f9 100644 --- a/pyrte_rrtmgp/config.py +++ b/pyrte_rrtmgp/config.py @@ -8,25 +8,25 @@ # Mapping of standard gas names to RRTMGP-specific names DEFAULT_GAS_MAPPING: Final[Dict[str, str]] = { - "h2o": "water_vapor", - "co2": "carbon_dioxide_GM", - "o3": "ozone", - "n2o": "nitrous_oxide_GM", - "co": "carbon_monoxide_GM", - "ch4": "methane_GM", - "o2": "oxygen_GM", - "n2": "nitrogen_GM", - "ccl4": "carbon_tetrachloride_GM", - "cfc11": "cfc11_GM", - "cfc12": "cfc12_GM", - "cfc22": "hcfc22_GM", - "hfc143a": "hfc143a_GM", - "hfc125": "hfc125_GM", - "hfc23": "hfc23_GM", - "hfc32": "hfc32_GM", - "hfc134a": "hfc134a_GM", - "cf4": "cf4_GM", - "no2": "no2", + "h2o": ["h2o", "water_vapor"], + "co2": ["co2", "carbon_dioxide_GM"], + "o3": ["o3", "ozone"], + "n2o": ["n2o", "nitrous_oxide_GM"], + "co": ["co", "carbon_monoxide_GM"], + "ch4": ["ch4", "methane_GM"], + "o2": ["o2", "oxygen_GM"], + "n2": ["n2", "nitrogen_GM"], + "ccl4": ["ccl4", "carbon_tetrachloride_GM"], + "cfc11": ["cfc11", "cfc11_GM"], + "cfc12": ["cfc12", "cfc12_GM"], + "cfc22": ["cfc22", "hcfc22_GM"], + "hfc143a": ["hfc143a", "hfc143a_GM"], + "hfc125": ["hfc125", "hfc125_GM"], + "hfc23": ["hfc23", "hfc23_GM"], + "hfc32": ["hfc32", "hfc32_GM"], + "hfc134a": ["hfc134a", "hfc134a_GM"], + "cf4": ["cf4", "cf4_GM"], + "no2": ["no2", "no2"], } # Mapping of standard dimension names to dataset-specific names diff --git a/pyrte_rrtmgp/data_validation.py b/pyrte_rrtmgp/data_validation.py index bc511e2..f425bf9 100644 --- a/pyrte_rrtmgp/data_validation.py +++ b/pyrte_rrtmgp/data_validation.py @@ -5,64 +5,10 @@ from pyrte_rrtmgp.config import ( DEFAULT_DIM_MAPPING, - DEFAULT_GAS_MAPPING, DEFAULT_VAR_MAPPING, ) -@dataclass -class GasMapping: - """Class for managing gas name mappings between standard and dataset-specific names. - - Attributes: - _mapping: Dictionary mapping standard gas names to dataset-specific names - _required_gases: Set of required gas names that must be present - """ - - _mapping: Dict[str, str] - _required_gases: Set[str] - - @classmethod - def create( - cls, gas_names: Set[str], custom_mapping: Optional[Dict[str, str]] = None - ) -> "GasMapping": - """Create a new GasMapping instance with default and custom mappings. - - Args: - gas_names: Set of required gas names - custom_mapping: Optional custom mapping to override defaults - - Returns: - New GasMapping instance - """ - mapping = DEFAULT_GAS_MAPPING.copy() - if custom_mapping: - mapping.update(custom_mapping) - - return cls(mapping, gas_names) - - def validate(self) -> Dict[str, str]: - """Validate and return the final gas name mapping. - - Returns: - Dictionary mapping standard gas names to dataset-specific names - - Raises: - ValueError: If a required gas is not found in any mapping - """ - validated_mapping = {} - - for gas in self._required_gases: - if gas not in self._mapping: - if gas not in DEFAULT_GAS_MAPPING: - raise ValueError(f"Gas {gas} not found in any mapping") - validated_mapping[gas] = DEFAULT_GAS_MAPPING[gas] - else: - validated_mapping[gas] = self._mapping[gas] - - return validated_mapping - - @dataclass class DatasetMapping: """Container for dimension and variable name mappings. @@ -111,9 +57,9 @@ def set_mapping(self, mapping: DatasetMapping) -> None: Raises: ValueError: If mapped dimensions don't exist in dataset """ - missing_dims = set(mapping.dim_mapping.values()) - set(self._obj.dims) - if missing_dims: - raise ValueError(f"Dataset missing required dimensions: {missing_dims}") + # missing_dims = set(mapping.dim_mapping.values()) - set(self._obj.dims) + # if missing_dims: + # raise ValueError(f"Dataset missing required dimensions: {missing_dims}") self._obj.attrs["dataset_mapping"] = asdict(mapping) diff --git a/pyrte_rrtmgp/kernels/rrtmgp.py b/pyrte_rrtmgp/kernels/rrtmgp.py index 1dbae78..353aa47 100644 --- a/pyrte_rrtmgp/kernels/rrtmgp.py +++ b/pyrte_rrtmgp/kernels/rrtmgp.py @@ -4,6 +4,7 @@ import numpy.typing as npt from pyrte_rrtmgp.pyrte_rrtmgp import ( + rrtmgp_compute_cld_from_table, rrtmgp_compute_Planck_source, rrtmgp_compute_tau_absorption, rrtmgp_compute_tau_rayleigh, @@ -118,7 +119,6 @@ def interpolation( rrtmgp_interpolation(*args) - tropo = tropo != 0 # Convert to boolean return jtemp, fmajor, fminor, col_mix, tropo, jeta, jpress @@ -466,3 +466,72 @@ def compute_tau_rayleigh( rrtmgp_compute_tau_rayleigh(*args) return tau_rayleigh + + +def compute_cld_from_table( + ncol: int, + nlay: int, + ngpt: int, + mask: npt.NDArray[np.bool_], + lwp: npt.NDArray[np.float64], + re: npt.NDArray[np.float64], + nsteps: int, + step_size: float, + offset: float, + tau_table: npt.NDArray[np.float64], + ssa_table: npt.NDArray[np.float64], + asy_table: npt.NDArray[np.float64], +) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.float64]]: + """Compute cloud optical properties using lookup tables. + + For size dimension, select size bin appropriate for the requested aerosol size. + For rh dimension, linearly interpolate values from a lookup table with "nrh" + unevenly-spaced elements "aero_rh". The last dimension for all tables is band. + Returns zero where no aerosol is present. + + Args: + ncol: Number of atmospheric columns + nlay: Number of atmospheric layers + nbnd: Number of spectral bands + mask: Cloud mask with shape (ncol, nlay) + lwp: Liquid water path with shape (ncol, nlay) + re: Effective radius with shape (ncol, nlay) + nsteps: Number of steps in the lookup tables + step_size: Step size for the lookup tables + offset: Offset for the lookup tables + tau_table: Optical depth lookup table with shape (nsteps, ngpt) + ssa_table: Single scattering albedo lookup table with shape (nsteps, ngpt) + asy_table: Asymmetry parameter lookup table with shape (nsteps, ngpt) + + Returns: + Tuple containing: + - tau: Cloud optical depth with shape (ncol, nlay, ngpt) + - taussa: Product of tau and single scattering albedo with shape (ncol, nlay, ngpt) + - taussag: Product of taussa and asymmetry parameter with shape (ncol, nlay, ngpt) + """ + # Initialize output arrays + tau = np.zeros((ncol, nlay, ngpt), dtype=np.float64, order="F") + taussa = np.zeros((ncol, nlay, ngpt), dtype=np.float64, order="F") + taussag = np.zeros((ncol, nlay, ngpt), dtype=np.float64, order="F") + + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(mask), + np.asfortranarray(lwp), + np.asfortranarray(re), + nsteps, + step_size, + offset, + np.asfortranarray(tau_table), + np.asfortranarray(ssa_table), + np.asfortranarray(asy_table), + tau, + taussa, + taussag, + ] + + rrtmgp_compute_cld_from_table(*args) + + return tau, taussa, taussag diff --git a/pyrte_rrtmgp/kernels/rte.py b/pyrte_rrtmgp/kernels/rte.py index c5848d3..9d97cac 100644 --- a/pyrte_rrtmgp/kernels/rte.py +++ b/pyrte_rrtmgp/kernels/rte.py @@ -4,6 +4,16 @@ import numpy.typing as npt from pyrte_rrtmgp.pyrte_rrtmgp import ( + rte_delta_scale_2str_f_k, + rte_delta_scale_2str_k, + rte_inc_1scalar_by_1scalar_bybnd, + rte_inc_1scalar_by_2stream_bybnd, + rte_inc_2stream_by_1scalar_bybnd, + rte_inc_2stream_by_2stream_bybnd, + rte_increment_1scalar_by_1scalar, + rte_increment_1scalar_by_2stream, + rte_increment_2stream_by_1scalar, + rte_increment_2stream_by_2stream, rte_lw_solver_2stream, rte_lw_solver_noscat, rte_sw_solver_2stream, @@ -304,3 +314,362 @@ def sw_solver_2stream( rte_sw_solver_2stream(*args) return flux_up, flux_dn, flux_dir, broadband_up, broadband_dn, broadband_dir + + +def increment_1scalar_by_1scalar( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Increment one set of optical properties with another set (scalar by scalar). + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, ngpt) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(tau_in), + ] + + rte_increment_1scalar_by_1scalar(*args) + + return tau_inout + + +def increment_1scalar_by_2stream( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], + ssa_in: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Increment scalar optical properties with 2-stream properties. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, ngpt) + ssa_in: Input single scattering albedo array (ncol, nlay, ngpt) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(tau_in), + np.asfortranarray(ssa_in), + ] + + rte_increment_1scalar_by_2stream(*args) + + return tau_inout + + +def increment_2stream_by_1scalar( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + ssa_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Increment 2-stream optical properties with scalar properties. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + ssa_inout: Single scattering albedo array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, ngpt) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(ssa_inout), + np.asfortranarray(tau_in), + ] + + rte_increment_2stream_by_1scalar(*args) + + return tau_inout, ssa_inout + + +def increment_2stream_by_2stream( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + ssa_inout: npt.NDArray[np.float64], + g_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], + ssa_in: npt.NDArray[np.float64], + g_in: npt.NDArray[np.float64], +) -> npt.NDArray[np.float64]: + """Increment one set of 2-stream optical properties with another. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + ssa_inout: Single scattering albedo array to be modified (ncol, nlay, ngpt) + g_inout: Asymmetry parameter array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, ngpt) + ssa_in: Input single scattering albedo array (ncol, nlay, ngpt) + g_in: Input asymmetry parameter array (ncol, nlay, ngpt) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(ssa_inout), + np.asfortranarray(g_inout), + np.asfortranarray(tau_in), + np.asfortranarray(ssa_in), + np.asfortranarray(g_in), + ] + + rte_increment_2stream_by_2stream(*args) + + return tau_inout, ssa_inout, g_inout + + +def inc_1scalar_by_1scalar_bybnd( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], + nbnd: int, + band_lims_gpoint: npt.NDArray[np.int32], +) -> npt.NDArray[np.float64]: + """Increment one set of scalar optical properties with another set by band. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, nbnd) + nbnd: Number of bands + band_lims_gpoint: Band limits for g-points (2, nbnd) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(tau_in), + nbnd, + np.asfortranarray(band_lims_gpoint), + ] + + rte_inc_1scalar_by_1scalar_bybnd(*args) + + return tau_inout + + +def inc_1scalar_by_2stream_bybnd( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], + ssa_in: npt.NDArray[np.float64], + nbnd: int, + band_lims_gpoint: npt.NDArray[np.int32], +) -> npt.NDArray[np.float64]: + """Increment scalar optical properties with 2-stream properties by band. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, nbnd) + ssa_in: Input single scattering albedo array (ncol, nlay, nbnd) + nbnd: Number of bands + band_lims_gpoint: Band limits for g-points (2, nbnd) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(tau_in), + np.asfortranarray(ssa_in), + nbnd, + np.asfortranarray(band_lims_gpoint), + ] + + rte_inc_1scalar_by_2stream_bybnd(*args) + + return tau_inout + + +def inc_2stream_by_1scalar_bybnd( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + ssa_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], + nbnd: int, + band_lims_gpoint: npt.NDArray[np.int32], +) -> npt.NDArray[np.float64]: + """Increment 2-stream optical properties with scalar properties by band. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + ssa_inout: Single scattering albedo array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, nbnd) + nbnd: Number of bands + band_lims_gpoint: Band limits for g-points (2, nbnd) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(ssa_inout), + np.asfortranarray(tau_in), + nbnd, + np.asfortranarray(band_lims_gpoint), + ] + + rte_inc_2stream_by_1scalar_bybnd(*args) + + return tau_inout + + +def inc_2stream_by_2stream_bybnd( + ncol: int, + nlay: int, + ngpt: int, + tau_inout: npt.NDArray[np.float64], + ssa_inout: npt.NDArray[np.float64], + g_inout: npt.NDArray[np.float64], + tau_in: npt.NDArray[np.float64], + ssa_in: npt.NDArray[np.float64], + g_in: npt.NDArray[np.float64], + nbnd: int, + band_lims_gpoint: npt.NDArray[np.int32], +) -> npt.NDArray[np.float64]: + """Increment one set of 2-stream optical properties with another by band. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau_inout: Optical depth array to be modified (ncol, nlay, ngpt) + ssa_inout: Single scattering albedo array to be modified (ncol, nlay, ngpt) + g_inout: Asymmetry parameter array to be modified (ncol, nlay, ngpt) + tau_in: Input optical depth array (ncol, nlay, nbnd) + ssa_in: Input single scattering albedo array (ncol, nlay, nbnd) + g_in: Input asymmetry parameter array (ncol, nlay, nbnd) + nbnd: Number of bands + band_lims_gpoint: Band limits for g-points (2, nbnd) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau_inout), + np.asfortranarray(ssa_inout), + np.asfortranarray(g_inout), + np.asfortranarray(tau_in), + np.asfortranarray(ssa_in), + np.asfortranarray(g_in), + nbnd, + np.asfortranarray(band_lims_gpoint), + ] + + rte_inc_2stream_by_2stream_bybnd(*args) + + return tau_inout + + +def delta_scale_2str( + ncol: int, + nlay: int, + ngpt: int, + tau: npt.NDArray[np.float64], + ssa: npt.NDArray[np.float64], + g: npt.NDArray[np.float64], +) -> None: + """Apply the delta-scaling transformation to two-stream radiative properties. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau: Optical depth array to be modified (ncol, nlay, ngpt) + ssa: Single scattering albedo array to be modified (ncol, nlay, ngpt) + g: Asymmetry parameter array to be modified (ncol, nlay, ngpt) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau), + np.asfortranarray(ssa), + np.asfortranarray(g), + ] + + rte_delta_scale_2str_k(*args) + + return tau, ssa, g + + +def delta_scale_2str_f( + ncol: int, + nlay: int, + ngpt: int, + tau: npt.NDArray[np.float64], + ssa: npt.NDArray[np.float64], + g: npt.NDArray[np.float64], + f: npt.NDArray[np.float64], +) -> None: + """Apply the delta-scaling transformation to two-stream radiative properties with forward scattering fraction. + + Args: + ncol: Number of columns + nlay: Number of layers + ngpt: Number of g-points + tau: Optical depth array to be modified (ncol, nlay, ngpt) + ssa: Single scattering albedo array to be modified (ncol, nlay, ngpt) + g: Asymmetry parameter array to be modified (ncol, nlay, ngpt) + f: Forward scattering fraction array (ncol, nlay, ngpt) + """ + args = [ + ncol, + nlay, + ngpt, + np.asfortranarray(tau), + np.asfortranarray(ssa), + np.asfortranarray(g), + np.asfortranarray(f), + ] + + rte_delta_scale_2str_f_k(*args) + + return tau, ssa, g diff --git a/pyrte_rrtmgp/rrtmgp_cloud_optics.py b/pyrte_rrtmgp/rrtmgp_cloud_optics.py new file mode 100644 index 0000000..e69de29 diff --git a/pyrte_rrtmgp/rrtmgp_gas_optics.py b/pyrte_rrtmgp/rrtmgp_gas_optics.py index c196892..25dc22d 100644 --- a/pyrte_rrtmgp/rrtmgp_gas_optics.py +++ b/pyrte_rrtmgp/rrtmgp_gas_optics.py @@ -8,6 +8,7 @@ import pandas as pd import xarray as xr +from pyrte_rrtmgp.config import DEFAULT_GAS_MAPPING from pyrte_rrtmgp.constants import ( AVOGAD, HELMERT1, @@ -19,7 +20,6 @@ from pyrte_rrtmgp.data_types import GasOpticsFiles, ProblemTypes from pyrte_rrtmgp.data_validation import ( AtmosphericMapping, - GasMapping, create_default_mapping, ) from pyrte_rrtmgp.kernels.rrtmgp import ( @@ -134,17 +134,12 @@ def _initialize_pressure_levels( Modified atmosphere dataset if inplace=False, otherwise None """ pres_level_var = atmosphere.mapping.get_var("pres_level") - - min_press = self._dataset["press_ref"].min().item() - - min_index = np.argmin(atmosphere[pres_level_var].data) min_press = self._dataset["press_ref"].min().item() + sys.float_info.epsilon - # Replace values smaller than min_press with min_press at min_index - atmosphere[pres_level_var][:, min_index] = xr.where( - atmosphere[pres_level_var][:, min_index] < min_press, + atmosphere[pres_level_var] = xr.where( + atmosphere[pres_level_var] < min_press, min_press, - atmosphere[pres_level_var][:, min_index], + atmosphere[pres_level_var], ) if not inplace: @@ -321,7 +316,7 @@ def interpolate( [site_dim, layer_dim], # jpress ], vectorize=True, - dask="allowed", + dask="parallelized", ) interpolation_results = xr.Dataset( @@ -374,9 +369,19 @@ def tau_absorption( lower_gases_mask = np.isin(minor_gases_lower, self._gas_names) upper_gases_mask = np.isin(minor_gases_upper, self._gas_names) - # TODO: Hardcoded 16, but shouldn't it be nbnd? - upper_gases_mask_expanded = np.repeat(upper_gases_mask, 16) - lower_gases_mask_expanded = np.repeat(lower_gases_mask, 16) + lower_gpt_sizes = ( + (self._dataset["minor_limits_gpt_lower"].diff(dim="pair") + 1) + .transpose() + .values[0] + ) + upper_gpt_sizes = ( + (self._dataset["minor_limits_gpt_upper"].diff(dim="pair") + 1) + .transpose() + .values[0] + ) + + upper_gases_mask_expanded = np.repeat(upper_gases_mask, upper_gpt_sizes) + lower_gases_mask_expanded = np.repeat(lower_gases_mask, lower_gpt_sizes) reduced_dataset = self._dataset.isel( contributors_lower=lower_gases_mask_expanded @@ -413,6 +418,13 @@ def tau_absorption( idx_minor_scaling_lower = self.get_idx_minor(scaling_gas_lower) idx_minor_scaling_upper = self.get_idx_minor(scaling_gas_upper) + kminor_start_lower = self._dataset["kminor_start_lower"].isel( + minor_absorber_intervals_lower=slice(nminorlower) + ) + kminor_start_upper = self._dataset["kminor_start_upper"].isel( + minor_absorber_intervals_upper=slice(nminorupper) + ) + pres_layer_var = atmosphere.mapping.get_var("pres_layer") temp_layer_var = atmosphere.mapping.get_var("temp_layer") @@ -447,8 +459,8 @@ def tau_absorption( idx_minor_upper, idx_minor_scaling_lower, idx_minor_scaling_upper, - reduced_dataset["kminor_start_lower"], - reduced_dataset["kminor_start_upper"], + kminor_start_lower, + kminor_start_upper, gas_interpolation_data["tropopause_mask"], gas_interpolation_data["column_mix"], gas_interpolation_data["fmajor"], @@ -698,11 +710,17 @@ def compute( Raises: ValueError: If problem_type is invalid """ - # Create and validate gas mapping - gas_mapping = GasMapping.create(self._gas_names, gas_name_map).validate() - gas_mapping = { - k: v for k, v in gas_mapping.items() if v in list(atmosphere.data_vars) - } + gas_mapping = {} + if gas_name_map is None: + for gas, valid_names in DEFAULT_GAS_MAPPING.items(): + for gas_data_name in list(atmosphere.data_vars): + if gas_data_name in valid_names: + gas_mapping[gas] = gas_data_name + else: + for gas in DEFAULT_GAS_MAPPING: + if gas in list(gas_name_map.keys()): + gas_mapping[gas] = gas_name_map[gas] + self._gas_names = [ k for k, v in gas_mapping.items() if v in list(atmosphere.data_vars) ] @@ -991,11 +1009,20 @@ def compute_sources(self, atmosphere: xr.Dataset, *args, **kwargs) -> xr.DataArr + (sb_index - b_offset) * solar_source_sunspot ) - total_solar_irradiance = atmosphere["total_solar_irradiance"] - - toa_flux = solar_source.broadcast_like(total_solar_irradiance) - def_tsi = toa_flux.sum(dim="gpt") - return (toa_flux * (total_solar_irradiance / def_tsi)).rename("toa_source") + # Check if total_solar_irradiance is available in atmosphere + if "total_solar_irradiance" in atmosphere: + total_solar_irradiance = atmosphere["total_solar_irradiance"] + toa_flux = solar_source.broadcast_like(total_solar_irradiance) + def_tsi = toa_flux.sum(dim="gpt") + return (toa_flux * (total_solar_irradiance / def_tsi)).rename("toa_source") + else: + # Compute normalization factor + norm = 1.0 / solar_source.sum(dim="gpt") + default_tsi = self._dataset["tsi_default"] + # Scale solar source to default TSI + site_dim = atmosphere.mapping.get_dim("site") + toa_source = (solar_source * default_tsi * norm).rename("toa_source") + return toa_source.expand_dims({site_dim: atmosphere[site_dim]}) def compute_boundary_conditions(self, atmosphere: xr.Dataset) -> xr.Dataset: """Compute surface and solar boundary conditions. @@ -1007,25 +1034,12 @@ def compute_boundary_conditions(self, atmosphere: xr.Dataset) -> xr.Dataset: Dataset containing solar zenith angles, surface albedos and solar angle mask """ layer_dim = atmosphere.mapping.get_dim("layer") - + site_dim = atmosphere.mapping.get_dim("site") solar_zenith_angle_var = atmosphere.mapping.get_var("solar_zenith_angle") surface_albedo_var = atmosphere.mapping.get_var("surface_albedo") surface_albedo_dir_var = atmosphere.mapping.get_var("surface_albedo_dir") surface_albedo_dif_var = atmosphere.mapping.get_var("surface_albedo_dif") - usecol_values = atmosphere[solar_zenith_angle_var] < ( - 90.0 - 2.0 * np.spacing(90.0) - ) - usecol_values = usecol_values.rename("solar_angle_mask") - mu0 = xr.where( - usecol_values, - np.cos(np.radians(atmosphere[solar_zenith_angle_var])), - 1.0, - ) - solar_zenith_angle = mu0.broadcast_like(atmosphere[layer_dim]).rename( - "solar_zenith_angle" - ) - if surface_albedo_dir_var not in atmosphere.data_vars: surface_albedo_direct = atmosphere[surface_albedo_var] surface_albedo_direct = surface_albedo_direct.rename( @@ -1045,14 +1059,38 @@ def compute_boundary_conditions(self, atmosphere: xr.Dataset) -> xr.Dataset: "surface_albedo_diffuse" ) - return xr.merge( - [ - solar_zenith_angle, - surface_albedo_direct, - surface_albedo_diffuse, + data_vars = [ + surface_albedo_direct, + surface_albedo_diffuse, + ] + + if solar_zenith_angle_var in atmosphere.data_vars: + usecol_values = atmosphere[solar_zenith_angle_var] < ( + 90.0 - 2.0 * np.spacing(90.0) + ) + if layer_dim in usecol_values.dims: + usecol_values = usecol_values.rename("solar_angle_mask").isel( + {layer_dim: 0} + ) + else: + usecol_values = usecol_values.rename("solar_angle_mask") + mu0 = xr.where( usecol_values, - ] - ) + np.cos(np.radians(atmosphere[solar_zenith_angle_var])), + 1.0, + ) + solar_zenith_angle = mu0.broadcast_like(atmosphere[layer_dim]).rename("mu0") + data_vars.append(solar_zenith_angle) + data_vars.append(usecol_values) + elif "mu0" in atmosphere.data_vars: + atmosphere["solar_angle_mask"] = xr.full_like( + atmosphere[site_dim], True + ).rename("solar_angle_mask") + atmosphere["mu0"] = atmosphere["mu0"].broadcast_like(atmosphere[layer_dim]) + data_vars.append(atmosphere["mu0"]) + data_vars.append(atmosphere["solar_angle_mask"]) + + return xr.merge(data_vars) def tau_rayleigh(self, gas_interpolation_data: xr.Dataset) -> xr.Dataset: """Compute Rayleigh scattering optical depth. diff --git a/pyrte_rrtmgp/rte_solver.py b/pyrte_rrtmgp/rte_solver.py index daec04f..85580f5 100644 --- a/pyrte_rrtmgp/rte_solver.py +++ b/pyrte_rrtmgp/rte_solver.py @@ -229,7 +229,7 @@ def _compute_sw_fluxes( problem_ds["tau"], problem_ds["ssa"], problem_ds["g"], - problem_ds[problem_ds.mapping.get_var("solar_zenith_angle")], + problem_ds["mu0"], problem_ds["surface_albedo_direct"], problem_ds["surface_albedo_diffuse"], problem_ds["toa_source"], diff --git a/pyrte_rrtmgp/tests/test_exported_functions.py b/pyrte_rrtmgp/tests/test_exported_functions.py index 154f817..0ca7d8a 100644 --- a/pyrte_rrtmgp/tests/test_exported_functions.py +++ b/pyrte_rrtmgp/tests/test_exported_functions.py @@ -216,7 +216,7 @@ def test_rte_increment_1scalar_by_nstream(): ] * (1.0 - tau_in[icol - 1, ilay - 1, igpt - 1]) py.rte_increment_1scalar_by_nstream(*parameters) - assert np.array_equal(tau_inout, res) + assert np.allclose(tau_inout, res, rtol=1e-7) ########################################### @@ -394,11 +394,9 @@ def test_rte_increment_2stream_by_2stream(): res_tau_inout[icol - 1, ilay - 1, igpt - 1] = tau12 py.rte_increment_2stream_by_2stream(*parameters) - assert ( - np.array_equal(res_tau_inout, tau_inout) - and np.array_equal(res_ssa_inout, ssa_inout) - and np.array_equal(res_g_inout, g_inout) - ) + assert np.allclose(res_tau_inout, tau_inout, rtol=1e-7) + assert np.allclose(res_ssa_inout, ssa_inout, rtol=1e-7) + assert np.allclose(res_g_inout, g_inout, rtol=1e-7) ########################################### diff --git a/pyrte_rrtmgp/tests/test_python_frontend/test_cld_lw_solver.py b/pyrte_rrtmgp/tests/test_python_frontend/test_cld_lw_solver.py new file mode 100644 index 0000000..b6678f8 --- /dev/null +++ b/pyrte_rrtmgp/tests/test_python_frontend/test_cld_lw_solver.py @@ -0,0 +1,87 @@ +import os + +import numpy as np +import xarray as xr + +from pyrte_rrtmgp import rrtmgp_gas_optics +from pyrte_rrtmgp.rrtmgp_data import download_rrtmgp_data +from pyrte_rrtmgp.rrtmgp_gas_optics import GasOpticsFiles, load_gas_optics +from pyrte_rrtmgp.rte_solver import RTESolver +from pyrte_rrtmgp.all_skys_funcs import ( + compute_clouds, + compute_cloud_optics, + combine_optical_props, +) +from pyrte_rrtmgp.utils import compute_profiles, create_gas_dataset + +ERROR_TOLERANCE = 1e-7 + +rte_rrtmgp_dir = download_rrtmgp_data() +rfmip_dir = os.path.join(rte_rrtmgp_dir, "examples", "all-sky") +ref_dir = os.path.join(rfmip_dir, "reference") +lw_clouds = os.path.join(rte_rrtmgp_dir, "rrtmgp-clouds-lw-bnd.nc") + + +def test_lw_solver_with_clouds(): + # Set up dimensions + ncol = 24 + nlay = 72 + + # Create atmospheric profiles and gas concentrations + profiles = compute_profiles(300, ncol, nlay) + gas_values = { + "co2": 348e-6, + "ch4": 1650e-9, + "n2o": 306e-9, + "n2": 0.7808, + "o2": 0.2095, + "co": 0.0, + } + gases = create_gas_dataset(gas_values, dims={"site": ncol, "layer": nlay}) + + # Set up atmosphere dataset + atmosphere = xr.merge([profiles, gases]) + top_at_1 = ( + atmosphere["pres_layer"].values[0, 0] < atmosphere["pres_layer"].values[0, -1] + ) + t_sfc = profiles["temp_level"][:, nlay if top_at_1 else 0] + atmosphere["surface_temperature"] = xr.DataArray(t_sfc, dims=["site"]) + + # Calculate gas optical properties + gas_optics_lw = load_gas_optics(gas_optics_file=GasOpticsFiles.LW_G256) + clear_sky_optical_props = gas_optics_lw.gas_optics.compute( + atmosphere, problem_type="absorption", add_to_input=False + ) + clear_sky_optical_props["surface_emissivity"] = 0.98 + + # Calculate cloud properties and optical properties + cloud_optics = xr.load_dataset(lw_clouds) + cloud_properties = compute_clouds( + cloud_optics, ncol, nlay, profiles["pres_layer"], profiles["temp_layer"] + ) + clouds_optical_props = compute_cloud_optics(cloud_properties, cloud_optics) + + # Combine optical properties and solve RTE + combined_optical_props = combine_optical_props( + clouds_optical_props, clear_sky_optical_props + ) + solver = RTESolver() + fluxes = solver.solve(combined_optical_props, add_to_input=False) + + # Load reference data and verify results + ref_data = xr.load_dataset( + os.path.join(ref_dir, "rrtmgp-allsky-lw-no-aerosols.nc"), + decode_cf=False, + ) + + # Compare results with reference data + assert np.isclose( + fluxes["lw_flux_up"].values, + ref_data["lw_flux_up"].values.T, + atol=ERROR_TOLERANCE, + ).all() + assert np.isclose( + fluxes["lw_flux_down"].values, + ref_data["lw_flux_dn"].values.T, + atol=ERROR_TOLERANCE, + ).all() diff --git a/pyrte_rrtmgp/tests/test_python_frontend/test_cld_sw_solver.py b/pyrte_rrtmgp/tests/test_python_frontend/test_cld_sw_solver.py new file mode 100644 index 0000000..48f0e8b --- /dev/null +++ b/pyrte_rrtmgp/tests/test_python_frontend/test_cld_sw_solver.py @@ -0,0 +1,108 @@ +import os + +import numpy as np +import xarray as xr + +from pyrte_rrtmgp import rrtmgp_gas_optics +from pyrte_rrtmgp.rrtmgp_data import download_rrtmgp_data +from pyrte_rrtmgp.rrtmgp_gas_optics import GasOpticsFiles, load_gas_optics +from pyrte_rrtmgp.rte_solver import RTESolver +from pyrte_rrtmgp.all_skys_funcs import ( + compute_clouds, + compute_cloud_optics, + combine_optical_props, + delta_scale_optical_props, +) +from pyrte_rrtmgp.utils import compute_profiles, create_gas_dataset + +ERROR_TOLERANCE = 1e-7 + +rte_rrtmgp_dir = download_rrtmgp_data() +rfmip_dir = os.path.join(rte_rrtmgp_dir, "examples", "all-sky") +ref_dir = os.path.join(rfmip_dir, "reference") +sw_clouds = os.path.join(rte_rrtmgp_dir, "rrtmgp-clouds-sw-bnd.nc") + + +def test_sw_solver_with_clouds(): + # Set up dimensions + ncol = 24 + nlay = 72 + + # Create atmospheric profiles and gas concentrations + profiles = compute_profiles(300, ncol, nlay) + gas_values = { + "co2": 348e-6, + "ch4": 1650e-9, + "n2o": 306e-9, + "n2": 0.7808, + "o2": 0.2095, + "co": 0.0, + } + gases = create_gas_dataset(gas_values, dims={"site": ncol, "layer": nlay}) + + # Set up atmosphere dataset + atmosphere = xr.merge([profiles, gases]) + top_at_1 = ( + atmosphere["pres_layer"].values[0, 0] < atmosphere["pres_layer"].values[0, -1] + ) + + # Load SW gas optics and add SW-specific properties + gas_optics_sw = load_gas_optics(gas_optics_file=GasOpticsFiles.SW_G224) + ngpt = gas_optics_sw.sizes["gpt"] + + # Add surface albedo and solar angle properties + atmosphere["surface_albedo_dir"] = xr.DataArray( + 0.06, + dims=["site", "gpt"], + coords={"site": np.arange(ncol), "gpt": np.arange(ngpt)}, + ) + atmosphere["surface_albedo_dif"] = xr.DataArray( + 0.06, + dims=["site", "gpt"], + coords={"site": np.arange(ncol), "gpt": np.arange(ngpt)}, + ) + atmosphere["mu0"] = xr.DataArray( + 0.86, + dims=["site", "layer"], + coords={"site": np.arange(ncol), "layer": np.arange(nlay)}, + ) + + # Calculate gas optical properties + clear_sky_optical_props = gas_optics_sw.gas_optics.compute( + atmosphere, problem_type="two-stream", add_to_input=False + ) + + # Calculate cloud properties and optical properties + cloud_optics = xr.load_dataset(sw_clouds) + cloud_properties = compute_clouds( + cloud_optics, ncol, nlay, profiles["pres_layer"], profiles["temp_layer"] + ) + clouds_optical_props = compute_cloud_optics( + cloud_properties, cloud_optics, lw=False + ) + clouds_optical_props = delta_scale_optical_props(clouds_optical_props) + + # Combine optical properties and solve RTE + combined_optical_props = combine_optical_props( + clouds_optical_props, clear_sky_optical_props + ) + solver = RTESolver() + fluxes = solver.solve(combined_optical_props, add_to_input=False) + + # Load reference data and verify results + ref_data = xr.load_dataset( + os.path.join(ref_dir, "rrtmgp-allsky-sw-no-aerosols.nc"), + decode_cf=False, + ) + + # Compare results with reference data + assert np.isclose( + fluxes["sw_flux_up"].values, + ref_data["sw_flux_up"].values.T, + atol=ERROR_TOLERANCE, + ).all() + assert np.isclose( + fluxes["sw_flux_down"].values, + ref_data["sw_flux_dn"].values.T, + atol=ERROR_TOLERANCE, + ).all() diff --git a/pyrte_rrtmgp/utils.py b/pyrte_rrtmgp/utils.py new file mode 100644 index 0000000..1fe75dc --- /dev/null +++ b/pyrte_rrtmgp/utils.py @@ -0,0 +1,182 @@ +import numpy as np +import xarray as xr + + +def create_gas_dataset(gas_values, dims): + """Create a dataset with gas values and dimensions. + + Args: + gas_values: Dictionary of gas values + dims: Dictionary of dimensions + + Returns: + xr.Dataset: Dataset with gas values and dimensions + """ + ds = xr.Dataset() + + dim_names = list(dims.keys()) + coords = {k: np.arange(v) for k, v in dims.items()} + + # Convert each gas value to 2D array and add as variable + for gas_name, value in gas_values.items(): + ds[gas_name] = xr.DataArray(value, dims=dim_names, coords=coords) + + return ds + + +def compute_profiles(sst: float, ncol: int, nlay: int) -> xr.Dataset: + """Construct atmospheric profiles following the RCEMIP protocol. + + Computes vertical profiles of pressure, temperature, humidity, and ozone + for radiative transfer calculations. Based on the RCEMIP (Radiative-Convective + Equilibrium Model Intercomparison Project) protocol. + + Args: + sst: Surface temperature [K] + ncol: Number of columns + nlay: Number of vertical layers + + Returns: + xr.Dataset: Dataset containing the following variables: + - pres_layer: Pressure at layer centers [Pa] + - temp_layer: Temperature at layer centers [K] + - pres_level: Pressure at layer interfaces [Pa] + - temp_level: Temperature at layer interfaces [K] + - water_vapor: Water vapor mass mixing ratio [kg/kg] + - ozone: Ozone mass mixing ratio [kg/kg] + + Raises: + ValueError: If input parameters are invalid + """ + # Input validation + if not isinstance(nlay, int) or nlay < 2: + raise ValueError("nlay must be an integer >= 2") + if not isinstance(ncol, int) or ncol < 1: + raise ValueError("ncol must be a positive integer") + if not isinstance(sst, (int, float)) or sst < 0: + raise ValueError("sst must be a positive number") + + # Physical constants + PHYS_CONSTANTS = { + "z_trop": 15000.0, # Tropopause height [m] + "z_top": 70.0e3, # Model top height [m] + "g": 9.79764, # Gravitational acceleration [m/s^2] + "Rd": 287.04, # Gas constant for dry air [J/kg/K] + "p0": 101480.0, # Surface pressure [Pa] + "gamma": 6.7e-3, # Lapse rate [K/m] + } + + # Humidity parameters + HUMIDITY_PARAMS = { + "z_q1": 4.0e3, # Scale height for water vapor [m] + "z_q2": 7.5e3, # Secondary scale height for water vapor [m] + "q_t": 1.0e-8, # Minimum specific humidity [kg/kg] + "q_0": 0.01864, # Surface specific humidity for 300K SST [kg/kg] + } + + # Ozone parameters + O3_PARAMS = { + "g1": 3.6478, + "g2": 0.83209, + "g3": 11.3515, + "o3_min": 1e-13, # Minimum ozone mixing ratio [kg/kg] + } + + # Constants + z_trop = PHYS_CONSTANTS["z_trop"] + z_top = PHYS_CONSTANTS["z_top"] + g1 = O3_PARAMS["g1"] + g2 = O3_PARAMS["g2"] + g3 = O3_PARAMS["g3"] + o3_min = O3_PARAMS["o3_min"] + g = PHYS_CONSTANTS["g"] + Rd = PHYS_CONSTANTS["Rd"] + p0 = PHYS_CONSTANTS["p0"] + z_q1 = HUMIDITY_PARAMS["z_q1"] + z_q2 = HUMIDITY_PARAMS["z_q2"] + q_t = HUMIDITY_PARAMS["q_t"] + gamma = PHYS_CONSTANTS["gamma"] + q_0 = HUMIDITY_PARAMS["q_0"] + + # Initial calculations + Tv0 = (1.0 + 0.608 * q_0) * sst + + # Split resolution above and below RCE tropopause (15 km or about 125 hPa) + z_lev = np.zeros(nlay + 1) + z_lev[0] = 0.0 + z_lev[1 : nlay // 2 + 1] = 2.0 * z_trop / nlay * np.arange(1, nlay // 2 + 1) + z_lev[nlay // 2 + 1 :] = z_trop + 2.0 * (z_top - z_trop) / nlay * np.arange( + 1, nlay // 2 + 1 + ) + z_lay = 0.5 * (z_lev[:-1] + z_lev[1:]) + + # Layer calculations with broadcasting + z_lay_bc = z_lay[np.newaxis, :] + z_lev_bc = z_lev[np.newaxis, :] + + q_lay = np.where( + z_lay_bc > z_trop, + q_t, + q_0 * np.exp(-z_lay_bc / z_q1) * np.exp(-((z_lay_bc / z_q2) ** 2)), + ) + t_lay = np.where( + z_lay_bc > z_trop, + sst - gamma * z_trop / (1.0 + 0.608 * q_0), + sst - gamma * z_lay_bc / (1.0 + 0.608 * q_lay), + ) + Tv_lay = (1.0 + 0.608 * q_lay) * t_lay + p_lay = np.where( + z_lay_bc > z_trop, + p0 + * (Tv_lay / Tv0) ** (g / (Rd * gamma)) + * np.exp(-((g * (z_lay_bc - z_trop)) / (Rd * Tv_lay))), + p0 * (Tv_lay / Tv0) ** (g / (Rd * gamma)), + ) + + p_hpa = p_lay / 100.0 + o3 = np.maximum(o3_min, g1 * p_hpa**g2 * np.exp(-p_hpa / g3) * 1.0e-6) + + # Level calculations with broadcasting + q_lev = np.where( + z_lev_bc > z_trop, + q_t, + q_0 * np.exp(-z_lev_bc / z_q1) * np.exp(-((z_lev_bc / z_q2) ** 2)), + ) + t_lev = np.where( + z_lev_bc > z_trop, + sst - gamma * z_trop / (1.0 + 0.608 * q_0), + sst - gamma * z_lev_bc / (1.0 + 0.608 * q_lev), + ) + Tv_lev = (1.0 + 0.608 * q_lev) * t_lev + p_lev = np.where( + z_lev_bc > z_trop, + p0 + * (Tv_lev / Tv0) ** (g / (Rd * gamma)) + * np.exp(-((g * (z_lev_bc - z_trop)) / (Rd * Tv_lev))), + p0 * (Tv_lev / Tv0) ** (g / (Rd * gamma)), + ) + + # Repeat profiles for each column + p_lay = np.repeat(p_lay, ncol, axis=0) + t_lay = np.repeat(t_lay, ncol, axis=0) + q_lay = np.repeat(q_lay, ncol, axis=0) + o3 = np.repeat(o3, ncol, axis=0) + p_lev = np.repeat(p_lev, ncol, axis=0) + t_lev = np.repeat(t_lev, ncol, axis=0) + + return xr.Dataset( + data_vars={ + "pres_layer": (["site", "layer"], p_lay), + "temp_layer": (["site", "layer"], t_lay), + "pres_level": (["site", "level"], p_lev), + "temp_level": (["site", "level"], t_lev), + "h2o": (["site", "layer"], q_lay), + "o3": (["site", "layer"], o3), + }, + attrs={ + "description": "Atmospheric profiles following RCEMIP protocol", + "sst": sst, + "ncol": ncol, + "nlay": nlay, + }, + )