Skip to content

Commit

Permalink
Working on the refactoring of apex
Browse files Browse the repository at this point in the history
  • Loading branch information
atomprobe-tc committed Aug 29, 2024
1 parent 3944006 commit 60b0c6f
Show file tree
Hide file tree
Showing 16 changed files with 222 additions and 277 deletions.
7 changes: 4 additions & 3 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@
//"../apex/InGaN_nanowires_linescan.edaxh5",
//"../apex/InGaN_nanowires_map.edaxh5",
//"../apex/InGaN_nanowires_spectra.edaxh5",
//"../apex/2023-08-16_Ni_NFDI.edaxh5",
"tests/data/173_0057.h5oina",
"tests/data/2023-08-16_Ni_NFDI.edaxh5",
//"tests/data/173_0057.h5oina",
"--reader",
"em",
"--nxdl",
Expand All @@ -57,7 +57,8 @@
//"--output=pynxtools_em/dbg/zeiss.nxs",
//"--output=pynxtools_em/dbg/tfs.nxs",
//"--output=dbg/apex.nxs"
"--output=dbg/hfive_oxford.nxs"],
//"--output=dbg/hfive_oxford.nxs",
"--output=dbg/hfive_apex.nxs"],
}
]
}
2 changes: 1 addition & 1 deletion src/pynxtools_em/parsers/conventions.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def parse(self, template) -> dict:
]
)
if not is_cartesian_cs_well_defined(handedness, directions):
raise ValueError(f"{csys_name}_reference_frame is not well defined!")
print(f"{csys_name}_reference_frame is not well defined!")

# could add tests for gnomonic and pattern_centre as well
return template
381 changes: 174 additions & 207 deletions src/pynxtools_em/parsers/hfive_apex.py

Large diffs are not rendered by default.

57 changes: 20 additions & 37 deletions src/pynxtools_em/parsers/hfive_oxford.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,7 @@ def check_if_supported(self):
return

with h5py.File(self.file_path, "r") as h5r:
req_fields = ["Manufacturer", "Software Version", "Format Version"]
for req_field in req_fields:
for req_field in ["Manufacturer", "Software Version", "Format Version"]:
if f"/{req_field}" not in h5r:
return

Expand Down Expand Up @@ -130,6 +129,7 @@ def parse(self, template: dict) -> dict:
ebsd_roi_overview(self.ebsd, self.id_mgn, template)
ebsd_roi_phase_ipf(self.ebsd, self.id_mgn, template)
self.id_mgn["roi_id"] += 1
self.ebsd = EbsdPointCloud()

# TODO:Vitesh example
return template
Expand All @@ -145,7 +145,7 @@ def parse_and_normalize_slice_ebsd_header(self, fp):
self.ebsd.dimensionality = 2
# the next two lines encode the typical assumption that is not reported in tech partner file!

dims = ["X", "Y"]
dims = ["X", "Y"] # TODO::1d example
for dim in dims:
for req_field in [f"{dim} Cells", f"{dim} Step"]:
if f"{grp_name}/{req_field}" not in fp:
Expand Down Expand Up @@ -188,14 +188,13 @@ def parse_and_normalize_slice_ebsd_phases(self, fp):
self.ebsd.phases[phase_idx] = {}
sub_grp_name = f"{grp_name}/{phase_id}"

req_fields = [
for req_field in [
"Phase Name",
"Reference",
"Lattice Angles",
"Lattice Dimensions",
"Space Group",
]
for req_field in req_fields:
]:
if f"{sub_grp_name}/{req_field}" not in fp:
print(f"Unable to parse {sub_grp_name}/{req_field} !")
self.ebsd = EbsdPointCloud()
Expand Down Expand Up @@ -236,6 +235,14 @@ def parse_and_normalize_slice_ebsd_phases(self, fp):
)
self.ebsd = EbsdPointCloud()
return
latt = Lattice(
abc[0].magnitude,
abc[1].magnitude,
abc[2].magnitude,
angles[0].magnitude,
angles[1].magnitude,
angles[2].magnitude,
)

# Space Group, no, H5T_NATIVE_INT32, (1, 1), Space group index.
# The attribute Symbol contains the string representation, for example P m -3 m.
Expand All @@ -246,45 +253,21 @@ def parse_and_normalize_slice_ebsd_phases(self, fp):
else:
self.ebsd.space_group = [space_group]

strct = Structure(title=phase_name, atoms=None, lattice=latt)
if len(self.ebsd.phase) > 0:
self.ebsd.phase.append(
Structure(
title=phase_name,
atoms=None,
lattice=Lattice(
abc[0],
abc[1],
abc[2],
angles[0],
angles[1],
angles[2],
),
)
)
self.ebsd.phase.append(strct)
else:
self.ebsd.phase = [
Structure(
title=phase_name,
atoms=None,
lattice=Lattice(
abc[0],
abc[1],
abc[2],
angles[0],
angles[1],
angles[2],
),
)
]
self.ebsd.phase = [strct]

def parse_and_normalize_slice_ebsd_data(self, fp):
# https://github.com/oinanoanalysis/h5oina/blob/master/H5OINAFile.md
grp_name = f"{self.prfx}/EBSD/Data"
if f"{grp_name}" not in fp:
raise ValueError(f"Unable to parse {grp_name} !")
print(f"Unable to parse {grp_name} !")
self.ebsd = EbsdPointCloud()
return

req_fields = ["Euler", "Phase", "X", "Y", "Band Contrast"]
for req_field in req_fields:
for req_field in ["Euler", "Phase", "X", "Y", "Band Contrast"]:
if f"{grp_name}/{req_field}" not in fp:
print(f"Unable to parse {grp_name}/{req_field} !")
self.ebsd = EbsdPointCloud()
Expand Down
6 changes: 4 additions & 2 deletions src/pynxtools_em/parsers/image_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ def __init__(self, file_path: str = "", verbose=False):
if file_path is not None and file_path != "":
self.file_path = file_path
else:
raise ValueError(f"{__name__} needs proper instantiation !")
print(f"{__name__} needs proper instantiation !")
return
self.tmp: Dict = {}
self.verbose = verbose
self.file_path_sha256 = None
Expand All @@ -43,4 +44,5 @@ def init_named_cache(self, ckey: str):
self.tmp[ckey] = {}
return ckey
else:
raise ValueError(f"Existent named cache {ckey} must not be overwritten !")
print(f"Existent named cache {ckey} must not be overwritten !")
return
2 changes: 1 addition & 1 deletion src/pynxtools_em/parsers/image_png_protochips.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def get_xml_metadata(self, file, fp):
string_to_number(v)
)
else:
raise KeyError(
print(
"Trying to register a duplicated key {key}"
)
if k.endswith(".Value"):
Expand Down
2 changes: 1 addition & 1 deletion src/pynxtools_em/parsers/image_tiff_jeol.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def check_if_tiff_jeol(self):
else:
self.flat_dict_meta[tmp[0]] = ureg.Quantity(tmp[1])
else:
raise KeyError(f"Found duplicated key {tmp[0]} !")
print(f"Found duplicated key {tmp[0]} !")
else:
print(f"WARNING::{line} is currently ignored !")

Expand Down
2 changes: 1 addition & 1 deletion src/pynxtools_em/parsers/image_tiff_point_electronic.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def xmpmeta_to_flat_dict(self, meta: fd.FlatDict):
if key not in self.flat_metadata:
self.flat_metadata[key] = string_to_number(obj)
else:
raise KeyError(f"Duplicated key {key} !")
print(f"Duplicated key {key} !")

def check_if_tiff_point_electronic(self):
"""Check if resource behind self.file_path is a TaggedImageFormat file.
Expand Down
4 changes: 2 additions & 2 deletions src/pynxtools_em/parsers/image_tiff_tfs.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def get_metadata(self):
# TODO::better use official convention to not read beyond the end of file
idx += 1
if pos_s is None or pos_e is None:
raise ValueError(
print(
f"Definition of byte boundaries for reading childs of [{parent}] was unsuccessful !"
)
# print(f"Search for [{parent}] in between byte offsets {pos_s} and {pos_e}")
Expand All @@ -150,7 +150,7 @@ def get_metadata(self):
else:
self.flat_dict_meta[f"{parent}/{term}"] = value
else:
raise ValueError(
print(
f"Detected an unexpected case {parent}/{term}, type: {type(value)} !"
)
else:
Expand Down
4 changes: 1 addition & 3 deletions src/pynxtools_em/parsers/rsciio_velox.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,7 @@ def process_event_data_em_metadata(self, obj: dict, template: dict) -> dict:
flat_orig_meta = fd.FlatDict(obj["original_metadata"], "/")

if (len(identifier) != 3) or (not all(isinstance(x, int) for x in identifier)):
raise ValueError(
f"Argument identifier {identifier} needs three int values!"
)
print(f"Argument identifier {identifier} needs three int values!")
trg = (
f"/ENTRY[entry{identifier[0]}]/measurement/event_data_em_set/EVENT_DATA_EM"
f"[event_data_em{identifier[1]}]/em_lab/ebeam_column"
Expand Down
4 changes: 2 additions & 2 deletions src/pynxtools_em/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@

from pynxtools_em.concepts.nxs_concepts import NxEmAppDef
from pynxtools_em.parsers.conventions import NxEmConventionParser
from pynxtools_em.parsers.hfive_apex import HdfFiveEdaxApexParser

# from pynxtools_em.parsers.hfive_apex import HdfFiveEdaxApexParser
# from pynxtools_em.parsers.hfive_bruker import HdfFiveBrukerEspritParser
# from pynxtools_em.parsers.hfive_dreamthreed import HdfFiveDreamThreedParser
# from pynxtools_em.parsers.hfive_ebsd import HdfFiveEbsdCommunityParser
Expand Down Expand Up @@ -113,7 +113,7 @@ def read(
print("Parse and map pieces of information within files from tech partners...")
if len(case.dat) == 1: # no sidecar file
parsers: List[type] = [
# HdfFiveEdaxApexParser,
HdfFiveEdaxApexParser,
# HdfFiveBrukerEspritParser,
# HdfFiveDreamThreedParser,
# HdfFiveEbsdCommunityParser,
Expand Down
8 changes: 3 additions & 5 deletions src/pynxtools_em/utils/gatan_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,9 @@ def gatan_image_spectrum_or_generic_nxdata(list_of_dict) -> str:
else:
token.append(obj["units"])
else:
raise ValueError(
f"{obj.keys()} are not exactly the expected keywords!"
)
print(f"{obj.keys()} are not exactly the expected keywords!")
else:
raise ValueError(f"{obj} is not a dict!")
print(f"{obj} is not a dict!")
if len(token) >= 1:
print("_".join(token))
unit_categories = []
Expand All @@ -61,7 +59,7 @@ def gatan_image_spectrum_or_generic_nxdata(list_of_dict) -> str:
elif base_unit == "kilogram * meter ** 2 / second ** 2":
unit_categories.append("eV")
else:
raise ValueError(
print(
f"Hitting an undefined case for base_unit {base_unit} !"
)
except UndefinedUnitError:
Expand Down
6 changes: 2 additions & 4 deletions src/pynxtools_em/utils/nion_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,9 @@ def nion_image_spectrum_or_generic_nxdata(list_of_dict) -> str:
else:
token.append(obj["units"])
else:
raise ValueError(
f"{obj.keys()} are not exactly the expected keywords!"
)
print(f"{obj.keys()} are not exactly the expected keywords!")
else:
raise ValueError(f"{obj} is not a dict!")
print(f"{obj} is not a dict!")
if len(token) >= 1:
return "_".join(token)
return ""
6 changes: 3 additions & 3 deletions src/pynxtools_em/utils/rsciio_hspy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ def get_named_axis(axes_metadata, dim_name):
if isinstance(axis, dict):
if "name" in axis:
if axis["name"] == dim_name:
reqs = ["offset", "scale", "size", "units"]
# "index_in_array" and "navigate" are currently not required
# and ignored but might become important
for req in reqs:
for req in ["offset", "scale", "size", "units"]:
if req not in axis:
raise ValueError(f"{req} not in {axis}!")
print(f"{req} not in {axis}!")
return None
retval = (
np.asarray(
axis["offset"]
Expand Down
2 changes: 1 addition & 1 deletion src/pynxtools_em/utils/string_conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def string_to_number(arg: str):
raise TypeError(f"Input argument arg needs to be a string!")


def rchop(s, suffix):
def rchop(s: str, suffix: str) -> str:
if suffix and s.endswith(suffix):
return s[: -len(suffix)]
return s
6 changes: 2 additions & 4 deletions src/pynxtools_em/utils/velox_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,9 @@ def velox_image_spectrum_or_generic_nxdata(list_of_dict) -> str:
else:
token.append(obj["units"])
else:
raise ValueError(
f"{obj.keys()} are not exactly the expected keywords!"
)
print(f"{obj.keys()} are not exactly the expected keywords!")
else:
raise ValueError(f"{obj} is not a dict!")
print(f"{obj} is not a dict!")
if len(token) >= 1:
print("_".join(token))
unit_categories = []
Expand Down

0 comments on commit 60b0c6f

Please sign in to comment.