Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add new adjusted weight scheme to work with DCSF and CCF jobs #689

Merged
merged 15 commits into from
Mar 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion MDANSE/Src/MDANSE/Framework/Jobs/CoordinationNumber.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def finalize(self):
idj = self.selectedElements.index(at2)

if idi == idj:
nij = ni * (ni - 1) / 2.0
nij = ni**2 / 2.0
else:
nij = ni * nj
self.hIntra[idi, idj] += self.hIntra[idj, idi]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from math import sqrt
from typing import Optional
import collections
import itertools
Expand Down Expand Up @@ -375,8 +376,8 @@ def finalize(self):
at1, at2 = pair
ni = nAtomsPerElement[at1]
nj = nAtomsPerElement[at2]
self._outputData[f"j(q,t)_long_{pair_str}"][:] /= ni * nj
self._outputData[f"j(q,t)_trans_{pair_str}"][:] /= ni * nj
self._outputData[f"j(q,t)_long_{pair_str}"][:] /= sqrt(ni * nj)
self._outputData[f"j(q,t)_trans_{pair_str}"][:] /= sqrt(ni * nj)
self._outputData[f"J(q,f)_long_{pair_str}"][:] = get_spectrum(
self._outputData[f"j(q,t)_long_{pair_str}"],
self.configuration["instrument_resolution"]["time_window"],
Expand All @@ -393,7 +394,7 @@ def finalize(self):
)

weights = self.configuration["weights"].get_weights()
weight_dict = get_weights(weights, nAtomsPerElement, 2)
weight_dict = get_weights(weights, nAtomsPerElement, 2, conc_exp=0.5)
assign_weights(self._outputData, weight_dict, "j(q,t)_long_%s%s")
assign_weights(self._outputData, weight_dict, "j(q,t)_trans_%s%s")
assign_weights(self._outputData, weight_dict, "J(q,f)_long_%s%s")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#

from math import sqrt
import collections
import itertools

Expand Down Expand Up @@ -249,26 +249,22 @@ def finalize(self):
"""
Finalizes the calculations (e.g. averaging the total term, output files creations ...)
"""

nAtomsPerElement = self.configuration["atom_selection"].get_natoms()

weights = self.configuration["weights"].get_weights()
weight_dict = get_weights(weights, nAtomsPerElement, 2)
weight_dict = get_weights(weights, nAtomsPerElement, 2, conc_exp=0.5)
assign_weights(self._outputData, weight_dict, "f(q,t)_%s%s")
assign_weights(self._outputData, weight_dict, "s(q,f)_%s%s")
for pair in self._elementsPairs:
pair_str = "".join(map(str, pair))
ni = nAtomsPerElement[pair[0]]
nj = nAtomsPerElement[pair[1]]
extra_scaling = 1.0 / np.sqrt(ni * nj)
self._outputData[f"f(q,t)_{pair_str}"].scaling_factor *= extra_scaling
self._outputData[f"f(q,t)_{pair_str}"] /= sqrt(ni * nj)
self._outputData[f"s(q,f)_{pair_str}"][:] = get_spectrum(
self._outputData[f"f(q,t)_{pair_str}"],
self.configuration["instrument_resolution"]["time_window"],
self.configuration["instrument_resolution"]["time_step"],
axis=1,
)
self._outputData[f"s(q,f)_{pair_str}"].scaling_factor *= extra_scaling

self._outputData["f(q,t)_total"][:] = weighted_sum(
self._outputData,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -259,14 +259,13 @@ def finalize(self):
assign_weights(self._outputData, weight_dict, "s(q,f)_%s")
for element, number in list(nAtomsPerElement.items()):
extra_scaling = 1.0 / number
self._outputData[f"f(q,t)_{element}"].scaling_factor *= extra_scaling
self._outputData[f"f(q,t)_{element}"] *= extra_scaling
self._outputData[f"s(q,f)_{element}"][:] = get_spectrum(
self._outputData[f"f(q,t)_{element}"],
self.configuration["instrument_resolution"]["time_window"],
self.configuration["instrument_resolution"]["time_step"],
axis=1,
)
self._outputData[f"s(q,f)_{element}"].scaling_factor *= extra_scaling

self._outputData["f(q,t)_total"][:] = weighted_sum(
self._outputData,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from math import sqrt
import collections
import itertools
from typing import List

import numpy as np

Expand Down Expand Up @@ -400,34 +400,45 @@ def finalize(self):
bj = self.configuration["trajectory"]["instance"].get_atom_property(
pair[1], "b_coherent"
)
sqrt_cij = sqrt(
nAtomsPerElement[pair[0]] * nAtomsPerElement[pair[1]] * norm_natoms**2
)
pre_fac = 1 if pair[0] == pair[1] else 2
self._outputData[f"f(q,t)_coh_{pair_str}"].scaling_factor *= (
pre_fac * bi * bj * sqrt_cij
)
self._outputData[f"s(q,f)_coh_{pair_str}"].scaling_factor *= (
pre_fac * bi * bj * sqrt_cij
)

if pair[0] == pair[1]: # Add a factor 2 if the two elements are different
self._outputData[f"f(q,t)_coh_{pair_str}"] *= bi * bj * norm_natoms
self._outputData[f"s(q,f)_coh_{pair_str}"] *= bi * bj * norm_natoms
else:
self._outputData[f"f(q,t)_coh_{pair_str}"] *= 2 * bi * bj * norm_natoms
self._outputData[f"s(q,f)_coh_{pair_str}"] *= 2 * bi * bj * norm_natoms

self._outputData["f(q,t)_coh_total"][:] += self._outputData[
f"f(q,t)_coh_{pair_str}"
][:]
self._outputData["s(q,f)_coh_total"][:] += self._outputData[
f"s(q,f)_coh_{pair_str}"
][:]
self._outputData["f(q,t)_coh_total"][:] += (
self._outputData[f"f(q,t)_coh_{pair_str}"][:]
* self._outputData[f"f(q,t)_coh_{pair_str}"].scaling_factor
)
self._outputData["s(q,f)_coh_total"][:] += (
self._outputData[f"s(q,f)_coh_{pair_str}"][:]
* self._outputData[f"s(q,f)_coh_{pair_str}"].scaling_factor
)

# Compute incoherent functions and structure factor
for element in nAtomsPerElement:
for element, number in nAtomsPerElement.items():
bi = self.configuration["trajectory"]["instance"].get_atom_property(
element, "b_incoherent2"
)
self._outputData[f"f(q,t)_inc_{element}"][:] *= bi * norm_natoms
self._outputData[f"s(q,f)_inc_{element}"][:] *= bi * norm_natoms
self._outputData["f(q,t)_inc_total"][:] += self._outputData[
f"f(q,t)_inc_{element}"
][:]
self._outputData["s(q,f)_inc_total"][:] += self._outputData[
f"s(q,f)_inc_{element}"
][:]
self._outputData[f"f(q,t)_inc_{element}"].scaling_factor *= (
bi * number * norm_natoms
)
self._outputData[f"s(q,f)_inc_{element}"].scaling_factor *= (
bi * number * norm_natoms
)
self._outputData["f(q,t)_inc_total"][:] += (
self._outputData[f"f(q,t)_inc_{element}"][:]
* self._outputData[f"f(q,t)_inc_{element}"].scaling_factor
)
self._outputData["s(q,f)_inc_total"][:] += (
self._outputData[f"s(q,f)_inc_{element}"][:]
* self._outputData[f"s(q,f)_inc_{element}"].scaling_factor
)

# Compute total F(Q,t) = inc + coh
self._outputData["f(q,t)_total"][:] = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def finalize(self):
idj = self.selectedElements.index(pair[1])

if idi == idj:
nij = ni * (ni - 1) / 2.0
nij = ni**2 / 2.0
else:
nij = ni * nj
self.hIntra[idi, idj] += self.hIntra[idj, idi]
Expand Down
5 changes: 3 additions & 2 deletions MDANSE/Src/MDANSE/Framework/Jobs/StaticStructureFactor.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def finalize(self):
idj = self.selectedElements.index(pair[1])

if pair[0] == pair[1]:
nij = ni * (ni - 1) / 2.0
nij = ni**2 / 2.0
else:
nij = ni * nj
self.hIntra[idi, idj] += self.hIntra[idj, idi]
Expand Down Expand Up @@ -212,11 +212,12 @@ def finalize(self):
weight_dict = get_weights(weights, nAtomsPerElement, 2)
assign_weights(self._outputData, weight_dict, "ssf_intra_%s%s")
assign_weights(self._outputData, weight_dict, "ssf_inter_%s%s")
assign_weights(self._outputData, weight_dict, "ssf_total_%s%s")

ssfIntra = weighted_sum(self._outputData, weight_dict, "ssf_intra_%s%s")
self._outputData["ssf_intra"][:] = ssfIntra

ssfInter = weighted_sum(self._outputData, weight_dict, "ssf_inter_%s%s")

self._outputData["ssf_inter"][:] = ssfInter

self._outputData["ssf_total"][:] = ssfIntra + ssfInter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ def finalize(self):
idj = self.selectedElements.index(pair[1])

if idi == idj:
nij = ni * (ni - 1) / 2.0
nij = ni**2 / 2.0
else:
nij = ni * nj
self.h_intra[idi, idj] += self.h_intra[idj, idi]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def finalize(self):
idj = self.selectedElements.index(pair[1])

if pair[0] == pair[1]:
nij = ni * (ni - 1) / 2.0
nij = ni**2 / 2.0
else:
nij = ni * nj
self.hIntra[idi, idj] += self.hIntra[idj, idi]
Expand Down Expand Up @@ -211,6 +211,7 @@ def finalize(self):
weight_dict = get_weights(asf, nAtomsPerElement, 2)
assign_weights(self._outputData, weight_dict, "xssf_intra_%s%s")
assign_weights(self._outputData, weight_dict, "xssf_inter_%s%s")
assign_weights(self._outputData, weight_dict, "xssf_total_%s%s")
xssfIntra = weighted_sum(self._outputData, weight_dict, "xssf_intra_%s%s")
self._outputData["xssf_intra"][:] = xssfIntra

Expand Down
18 changes: 12 additions & 6 deletions MDANSE/Src/MDANSE/Mathematics/Arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
import numpy as np


def get_weights(props: Dict[str, float], contents: Dict[str, int], dim: int):
def get_weights(
props: Dict[str, float], contents: Dict[str, int], dim: int, conc_exp: float = 1.0
):
"""Calculate the scaling factors to be applied to output datasets.

Returns a dictionary of scaling factors, where the
Expand All @@ -33,6 +35,9 @@ def get_weights(props: Dict[str, float], contents: Dict[str, int], dim: int):
Dictionary of numbers of atoms in an object
dim : int
number of atom types in the label of the output datasets (e.g. 1 for "O", 2 for "CuCu")
conc_exp : float
The exponent the at the product of the concentrations are taken
to (e.g. (c_i * c_j)**0.5 which is used for DCSF jobs).

Returns
-------
Expand All @@ -43,17 +48,18 @@ def get_weights(props: Dict[str, float], contents: Dict[str, int], dim: int):

weights = {}

n_atms = sum(contents[el] for el in props)
cartesianProduct = itertools.product(props, repeat=dim)
for elements in cartesianProduct:
atom_number_product = np.prod([contents[el] for el in elements])
atom_conc_product = np.prod([contents[el] / n_atms for el in elements])
property_product = np.prod(np.array([props[el] for el in elements]), axis=0)

factor = atom_number_product * property_product
# E.g. for property b_coh, 5 Cu atoms and dim=2
# factor = 5*5 * b_coh(Cu)*b_coh(Cu)
factor = atom_conc_product**conc_exp * property_product
# E.g. for property b_coh, 5 Cu atoms, 100 total atoms, and dim=2
# factor = (5*5/(100*100))**conc_exp * b_coh(Cu)*b_coh(Cu)

weights[elements] = np.float64(np.copy(factor))
normFactor += factor
normFactor += atom_conc_product * property_product

normalise = True
try:
Expand Down
34 changes: 23 additions & 11 deletions MDANSE/Tests/UnitTests/Analysis/test_scattering.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ def dcsf():
parameters["instrument_resolution"] = ("Ideal", {})
parameters["output_files"] = (temp_name, ("MDAFormat",), "INFO")
parameters["q_vectors"] = (
"SphericalLatticeQVectors",
{"seed": 0, "shells": (5.0, 36, 10.0), "n_vectors": 10, "width": 9.0},
"GridQVectors",
{"hrange": [0, 3, 1], "krange": [0, 3, 1], "lrange": [0, 3, 1], "qstep": 1},
)
parameters["running_mode"] = ("single-core",)
parameters["trajectory"] = short_traj
Expand All @@ -74,8 +74,8 @@ def disf():
parameters["instrument_resolution"] = ("Ideal", {})
parameters["output_files"] = (temp_name, ("MDAFormat",), "INFO")
parameters["q_vectors"] = (
"SphericalLatticeQVectors",
{"seed": 0, "shells": (5.0, 36, 10.0), "n_vectors": 10, "width": 9.0},
"GridQVectors",
{"hrange": [0, 3, 1], "krange": [0, 3, 1], "lrange": [0, 3, 1], "qstep": 1},
)
parameters["running_mode"] = ("single-core",)
parameters["trajectory"] = short_traj
Expand Down Expand Up @@ -110,13 +110,13 @@ def test_dcsf(traj_info, qvector_grid):
result_file = os.path.join(result_dir, f"dcsf_{traj_info[0]}.mda")
with h5py.File(temp_name + ".mda") as actual, h5py.File(result_file) as desired:
keys = [
key for key in desired.keys()
key for key in desired.keys()
if any(key.startswith(j) for j in ["f(q,t)", "s(q,f)"])
]
for key in keys:
np.testing.assert_array_almost_equal(
actual[f"/{key}"] * actual[f"/{key}"].attrs["scaling_factor"],
desired[f"/{key}"],
desired[f"/{key}"] * desired[f"/{key}"].attrs["scaling_factor"],
)

os.remove(temp_name + ".mda")
Expand Down Expand Up @@ -144,8 +144,8 @@ def test_ccf(traj_info, qvector_grid):
parameters["running_mode"] = ("single-core",)
parameters["trajectory"] = traj_info[1]
parameters["weights"] = "equal"
dcsf = IJob.create("CurrentCorrelationFunction")
dcsf.run(parameters, status=True)
ccf = IJob.create("CurrentCorrelationFunction")
ccf.run(parameters, status=True)
assert path.exists(temp_name + ".mda")
assert path.isfile(temp_name + ".mda")

Expand All @@ -155,9 +155,9 @@ def test_ccf(traj_info, qvector_grid):
i for i in desired.keys() if any([j in i for j in ["J(q,f)", "j(q,t)"]])
]
for key in keys:
# reference results were not rescaled
np.testing.assert_array_almost_equal(
actual[f"/{key}"], desired[f"/{key}"],
actual[f"/{key}"] * actual[f"/{key}"].attrs["scaling_factor"],
desired[f"/{key}"] * desired[f"/{key}"].attrs["scaling_factor"],
)

os.remove(temp_name + ".mda")
Expand Down Expand Up @@ -217,7 +217,7 @@ def test_disf(traj_info, qvector_grid):
for key in keys:
np.testing.assert_array_almost_equal(
actual[f"/{key}"] * actual[f"/{key}"].attrs["scaling_factor"],
desired[f"/{key}"],
desired[f"/{key}"] * desired[f"/{key}"].attrs["scaling_factor"],
)

os.remove(temp_name + ".mda")
Expand Down Expand Up @@ -322,6 +322,18 @@ def test_ndtsf(disf, dcsf, qvector_grid):
ndtsf.run(parameters, status=True)
assert path.exists(temp_name + ".mda")
assert path.isfile(temp_name + ".mda")

result_file = os.path.join(result_dir, "ndtsf.mda")
with h5py.File(temp_name + ".mda") as actual, h5py.File(result_file) as desired:
keys = [
i for i in desired.keys() if any([j in i for j in ["f(q,t)", "s(q,f)"]])
]
for key in keys:
np.testing.assert_array_almost_equal(
actual[f"/{key}"] * actual[f"/{key}"].attrs["scaling_factor"],
desired[f"/{key}"] * desired[f"/{key}"].attrs["scaling_factor"],
)

os.remove(temp_name + ".mda")
assert path.exists(temp_name + "_text.tar")
assert path.isfile(temp_name + "_text.tar")
Expand Down
Binary file modified MDANSE/Tests/UnitTests/Results/ccf_com_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/ccf_mdmc_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/ccf_short_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/dcsf_com_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/dcsf_mdmc_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/dcsf_short_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/disf_com_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/disf_mdmc_traj.mda
Binary file not shown.
Binary file modified MDANSE/Tests/UnitTests/Results/disf_short_traj.mda
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added MDANSE/Tests/UnitTests/Results/ndtsf.mda
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
6 changes: 3 additions & 3 deletions MDANSE_GUI/Src/MDANSE_GUI/Tabs/Models/PlottingContext.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def standard_items(self, key: int) -> List["QStandardItem"]:
"Colour",
"Line style",
"Marker",
"Scaling",
"Apply weights?",
]
plotting_column_index = {
label: number for number, label in enumerate(plotting_column_labels)
Expand Down Expand Up @@ -396,7 +396,7 @@ def datasets(self) -> Dict:
)
set_scaling = (
self.itemFromIndex(
self.index(row, plotting_column_index["Scaling"])
self.index(row, plotting_column_index["Apply weights?"])
).checkState()
== Qt.CheckState.Checked
)
Expand Down Expand Up @@ -453,7 +453,7 @@ def add_dataset(self, new_dataset: SingleDataset):
temp = items[plotting_column_index["Use it?"]]
temp.setCheckable(True)
temp.setCheckState(Qt.CheckState.Checked)
temp = items[plotting_column_index["Scaling"]]
temp = items[plotting_column_index["Apply weights?"]]
temp.setEditable(False)
temp.setCheckable(True)
temp.setCheckState(Qt.CheckState.Checked)
Expand Down