diff --git a/README.md b/README.md index 04180bf..f2a68b4 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ it if found. It is also possible to use older versions of the standard via a command line option, provided they are still available for download (at the time of -writing, standards are available from 2014a to 2018b). +writing, standards are available since 2014a). dcm_dump_info ------------- diff --git a/dcm_spec_tools/spec_reader/condition.py b/dcm_spec_tools/spec_reader/condition.py index 8486066..501186b 100644 --- a/dcm_spec_tools/spec_reader/condition.py +++ b/dcm_spec_tools/spec_reader/condition.py @@ -131,4 +131,3 @@ def to_string(self, dict_info): elif self.operator == '>': result += ' is greater than ' + self.values[0] return result - diff --git a/dcm_spec_tools/spec_reader/condition_parser.py b/dcm_spec_tools/spec_reader/condition_parser.py index 7553b6d..2237aeb 100644 --- a/dcm_spec_tools/spec_reader/condition_parser.py +++ b/dcm_spec_tools/spec_reader/condition_parser.py @@ -129,7 +129,8 @@ def _tag_id(tag_id_string): def _parse_tag(self, tag_string): match = self.tag_expression.match(tag_string.strip()) if match: - value_index = 0 if match.group('index') is None else int(match.group('index')) - 1 + value_index = (0 if match.group('index') is None + else int(match.group('index')) - 1) if match.group('id') is not None: return match.group('id'), value_index tag_name = match.group('name').strip() @@ -177,8 +178,10 @@ def extract_value_string(self, value_string): continue if end_index > 0: if value_string.find(' or ') in [end_index, end_index + 1]: - # differentiate between several values and several conditions - check if the rest is a condition - or_cond = self._parse_tag_expressions(value_string[end_index + 3:]) + # differentiate between several values and several + # conditions - check if the rest is a condition + or_cond = self._parse_tag_expressions( + value_string[end_index + 3:]) if or_cond.type == 'U': start_index = end_index + 4 continue @@ -199,7 +202,8 @@ def _parse_tag_expressions(self, condition, nested=False): condition = rest[len(operator) + 1:] break if logical_op is not None: - next_result = self._parse_tag_expressions(condition, nested=True) + next_result = self._parse_tag_expressions( + condition, nested=True) if next_result.type != 'U': next_result.type = None new_result = Condition(ctype=result.type) @@ -250,13 +254,15 @@ def _parse_multiple_tags(self, condition, operator, values, logical_op): result = Condition() cond_list = self._condition_list(logical_op, result) for tag_string in condition.split(', '): - tag_result = self._result_from_tag_string(tag_string, operator, values) + tag_result = self._result_from_tag_string( + tag_string, operator, values) if tag_result: cond_list.append(tag_result) if len(cond_list) > 1: return result - def _condition_list(self, logical_op, result): + @staticmethod + def _condition_list(logical_op, result): cond_list = (result.and_conditions if logical_op == 'and' else result.or_conditions) return cond_list diff --git a/dcm_spec_tools/spec_reader/edition_reader.py b/dcm_spec_tools/spec_reader/edition_reader.py index a8ab122..935f71d 100644 --- a/dcm_spec_tools/spec_reader/edition_reader.py +++ b/dcm_spec_tools/spec_reader/edition_reader.py @@ -4,6 +4,7 @@ import os import re import sys +from abc import ABC from dcm_spec_tools import __version__ from dcm_spec_tools.spec_reader.part3_reader import Part3Reader @@ -11,18 +12,12 @@ from dcm_spec_tools.spec_reader.part6_reader import Part6Reader from dcm_spec_tools.spec_reader.serializer import DefinitionEncoder -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve +from urllib.request import urlretrieve -try: - import HTMLParser as html_parser -except ImportError: - import html.parser as html_parser +import html.parser as html_parser -class EditionParser(html_parser.HTMLParser): +class EditionParser(html_parser.HTMLParser, ABC): edition_re = re.compile(r'\d\d\d\d[a-h]') def __init__(self): @@ -125,7 +120,8 @@ def get_edition(self, revision): def is_current(self, revision): """Get the edition matching the revision or None. - The revision can be the edition name, the year of the edition, or 'current'. + The revision can be the edition name, the year of the edition, + or 'current'. """ if revision is None: return True diff --git a/dcm_spec_tools/spec_reader/part3_reader.py b/dcm_spec_tools/spec_reader/part3_reader.py index 138ed70..b9da0f5 100644 --- a/dcm_spec_tools/spec_reader/part3_reader.py +++ b/dcm_spec_tools/spec_reader/part3_reader.py @@ -1,5 +1,6 @@ """ -Chapter3Reader collects DICOM Information Object Definition information for specific Storage SOP Classes. +Chapter3Reader collects DICOM Information Object Definition information +for specific Storage SOP Classes. The information is taken from PS3.3 in docbook format as provided by ACR NEMA. """ import logging @@ -8,7 +9,9 @@ import sys from dcm_spec_tools.spec_reader.condition_parser import ConditionParser -from dcm_spec_tools.spec_reader.spec_reader import SpecReader, SpecReaderParseError, SpecReaderLookupError +from dcm_spec_tools.spec_reader.spec_reader import ( + SpecReader, SpecReaderParseError, SpecReaderLookupError +) class Part3Reader(SpecReader): @@ -33,11 +36,13 @@ def iod_description(self, chapter): """Return the IOD information for the given chapter. The return value is a dict with the entries: - 'title': The display name of the IOD - 'modules': A dictionary of the contained IOD modules with the module name as key. - A module dict value has the following entries: - 'ref': The section in PS3.3 describing the module (e.g. 'C.7.4.2') - 'use': Usage information (e.g. 'M' for mandatory) + 'title': The display name of the IOD + 'modules': A dictionary of the contained IOD modules with the + module name as key. + A module dict value has the following entries: + 'ref': The section in PS3.3 describing the module + (e.g. 'C.7.4.2') + 'use': Usage information (e.g. 'M' for mandatory) Raises SpecReaderLookupError if the chapter is not found. """ if chapter not in self._iod_descriptions: @@ -48,29 +53,33 @@ def iod_description(self, chapter): try: return self._iod_descriptions[chapter] except KeyError: - raise SpecReaderLookupError('No definition found for chapter {}'.format(chapter)) + raise SpecReaderLookupError( + 'No definition found for chapter {}'.format(chapter)) def iod_descriptions(self): """Return the IOD information dict per chapter. - The dict has the chapter (e.g. 'A.3') as key and the IOD descriptions as value. + The dict has the chapter (e.g. 'A.3') as key and the IOD descriptions + as value. See iod_description() for the format of the IOD descriptions. Retired IODs (which have no module list) are omitted. """ - return {chapter: self.iod_description(chapter) for chapter in self._get_iod_nodes() + return {chapter: self.iod_description(chapter) for chapter in + self._get_iod_nodes() if self.iod_description(chapter)['modules']} def module_description(self, section): """Return the module information in the given section. The return value is a dict with the entries: - 'title': The name of the module - 'attributes': A dictionary of the contained module attributes with the tag as key. - An attribute dict value has the following entries: - 'name': the tag name - 'type': the type (1, 1C, 2, 2C, 3) - 'items': only for sequence tags - contains a dictionary - of the module attributes contained in the sequence + 'title': The name of the module + 'attributes': A dictionary of the contained module attributes + with the tag as key. + An attribute dict value has the following entries: + 'name': the tag name + 'type': the type (1, 1C, 2, 2C, 3) + 'items': only for sequence tags - contains a dictionary + of the module attributes contained in the sequence Raises SpecReaderLookupError if the section is not found. """ if section not in self._module_descriptions: @@ -81,12 +90,14 @@ def module_description(self, section): try: return self._module_descriptions[section] except KeyError: - raise SpecReaderLookupError('No definition found for section {}'.format(section)) + raise SpecReaderLookupError( + 'No definition found for section {}'.format(section)) def module_descriptions(self): """Return the module attribute information for all IODs. - The return value is a dict with the section name as key and a description dict as value. + The return value is a dict with the section name as key and + a description dict as value. See module_description() for the content of the value dict. """ # ensure that all module attributes are read @@ -95,7 +106,8 @@ def module_descriptions(self): def _get_iod_nodes(self): if not self._iod_nodes: - chapter_a = self._find(self._get_doc_root(), ['chapter[@label="A"]']) + chapter_a = self._find(self._get_doc_root(), + ['chapter[@label="A"]']) if chapter_a is None: raise SpecReaderParseError('Chapter A in Part 3 not found') # ignore A.1 @@ -109,13 +121,16 @@ def _get_iod_nodes(self): iod_sub_nodes = [] nodes_with_subnodes = [] for iod_node in all_iod_nodes: - sub_nodes = self._find_sections_with_title_endings(iod_node, iod_def_endings) + sub_nodes = self._find_sections_with_title_endings( + iod_node, iod_def_endings) if sub_nodes: nodes_with_subnodes.append(iod_node) iod_sub_nodes.extend(sub_nodes) - all_iod_nodes = [node for node in all_iod_nodes if node not in nodes_with_subnodes] + all_iod_nodes = [node for node in all_iod_nodes if + node not in nodes_with_subnodes] all_iod_nodes.extend(iod_sub_nodes) - self._iod_nodes = {node.attrib['label']: node for node in all_iod_nodes} + self._iod_nodes = {node.attrib['label']: node for node in + all_iod_nodes} return self._iod_nodes def _get_section_node(self, section): @@ -171,14 +186,16 @@ def _handle_included_attributes(self, columns, current_descriptions): if label not in self._module_descriptions: ref_node = self._get_ref_node(element, label) if ref_node is None: - raise SpecReaderLookupError('Failed to lookup include reference ' + include_ref) + raise SpecReaderLookupError( + 'Failed to lookup include reference ' + include_ref) # it is allowed to have no attributes (example: Raw Data) ref_description = self._parse_module_description(ref_node) or {} self._module_descriptions[label] = ref_description current_descriptions[-1].setdefault('include', []).append(label) self._current_refs.pop() - def _handle_regular_attribute(self, columns, current_descriptions, last_tag_id, tag_name): + def _handle_regular_attribute(self, columns, current_descriptions, + last_tag_id, tag_name): tag_id = self._find_text(columns[1]) tag_type = self._find_text(columns[2]) if tag_id: @@ -191,14 +208,16 @@ def _handle_regular_attribute(self, columns, current_descriptions, last_tag_id, # index = cond.find('Required if ') # if index >= 0: # current_descriptions[-1][tag_id]['desc'] = cond[index:] - current_descriptions[-1][tag_id]['cond'] = self._condition_parser.parse( + current_descriptions[-1][tag_id][ + 'cond'] = self._condition_parser.parse( self._find_all_text(columns[3])) last_tag_id = tag_id return last_tag_id def _get_ref_node(self, element, label): - return self._get_doc_tree().find('.//{}{}[@label="{}"]'.format(self.docbook_ns, element, label)) + return self._get_doc_tree().find( + './/{}{}[@label="{}"]'.format(self.docbook_ns, element, label)) @staticmethod def _get_ref_element_and_label(ref): @@ -207,7 +226,8 @@ def _get_ref_element_and_label(ref): element = 'section' return element, label - def _get_tag_name_and_level(self, column, current_descriptions, current_level, last_tag_id): + def _get_tag_name_and_level(self, column, current_descriptions, + current_level, last_tag_id): tag_name = self._find_text(column) if not tag_name: return '', 0 @@ -217,7 +237,8 @@ def _get_tag_name_and_level(self, column, current_descriptions, current_level, l if level > current_level: sequence_description = {} try: - current_descriptions[-1][last_tag_id]['items'] = sequence_description + current_descriptions[-1][last_tag_id][ + 'items'] = sequence_description current_descriptions.append(sequence_description) except KeyError: # silently ignore error in older specs @@ -227,12 +248,15 @@ def _get_tag_name_and_level(self, column, current_descriptions, current_level, l return tag_name, level def _get_iod_modules(self, iod_node): - module_table_sections = self._find_sections_with_title_endings(iod_node, (' Module Table', ' IOD Modules')) + module_table_sections = self._find_sections_with_title_endings( + iod_node, (' Module Table', ' IOD Modules')) if not module_table_sections: - module_table_sections = self._find_sections_with_title_endings(iod_node, ('IOD Entity-Relationship Model',)) + module_table_sections = self._find_sections_with_title_endings( + iod_node, ('IOD Entity-Relationship Model',)) modules = {} if len(module_table_sections) == 1: - module_rows = self._findall(module_table_sections[0], ['table', 'tbody', 'tr']) + module_rows = self._findall(module_table_sections[0], + ['table', 'tbody', 'tr']) row_span = 0 for row in module_rows: columns = self._findall(row, ['td']) @@ -245,19 +269,26 @@ def _get_iod_modules(self, iod_node): name = self._find_text(columns[name_index]) modules[name] = {} try: - ref_section = self._find(columns[name_index + 1], ['para', 'xref']).attrib['linkend'].split('_')[1] + ref_section = self._find(columns[name_index + 1], + ['para', 'xref']).attrib[ + 'linkend'].split('_')[1] except AttributeError: try: - ref_section = self._find(columns[name_index + 1], ['xref']).attrib['linkend'].split('_')[1] + ref_section = self._find( + columns[name_index + 1], ['xref']).attrib[ + 'linkend'].split('_')[1] except AttributeError: - self.logger.warning('Failed to read module table for %s', name) + self.logger.warning( + 'Failed to read module table for %s', name) continue modules[name]['ref'] = ref_section # make sure the module description is loaded self.module_description(ref_section) modules[name]['use'] = self._find_text(columns[name_index + 2]) - if self._condition_parser is not None and modules[name]['use'].startswith('C - '): - modules[name]['cond'] = self._condition_parser.parse(modules[name]['use']) + if (self._condition_parser is not None and + modules[name]['use'].startswith('C - ')): + modules[name]['cond'] = self._condition_parser.parse( + modules[name]['use']) else: modules[name]['use'] = modules[name]['use'][0] row_span -= 1 @@ -270,6 +301,7 @@ def _find_sections_with_title_endings(self, node, title_endings): title_node = self._find(sections_node, ['title']) if title_node is not None: title = title_node.text - if any([title.endswith(title_ending) for title_ending in title_endings]): + if any([title.endswith(title_ending) for title_ending in + title_endings]): found_nodes.append(sections_node) return found_nodes diff --git a/dcm_spec_tools/spec_reader/part4_reader.py b/dcm_spec_tools/spec_reader/part4_reader.py index f2b41e3..fda78bc 100644 --- a/dcm_spec_tools/spec_reader/part4_reader.py +++ b/dcm_spec_tools/spec_reader/part4_reader.py @@ -1,8 +1,11 @@ """ -Chapter4Reader collects SOP Class Information information for specific Storage SOP Classes. +Chapter4Reader collects SOP Class Information information for specific +Storage SOP Classes. The information is taken from PS3.4 in docbook format as provided by ACR NEMA. """ -from dcm_spec_tools.spec_reader.spec_reader import SpecReader, SpecReaderLookupError, SpecReaderParseError +from dcm_spec_tools.spec_reader.spec_reader import ( + SpecReader, SpecReaderLookupError, SpecReaderParseError +) class Part4Reader(SpecReader): @@ -21,23 +24,30 @@ def iod_chapter(self, sop_class_uid): try: return self._sop_class_uids[sop_class_uid] except KeyError: - raise SpecReaderLookupError('SOP Class {} not found'.format(sop_class_uid)) + raise SpecReaderLookupError( + 'SOP Class {} not found'.format(sop_class_uid)) def iod_chapters(self): - """Return a dict of the chapter in part 3 for each SOP Class listed in table B.5.""" + """Return a dict of the chapter in part 3 for each SOP Class + listed in table B.5. + """ if not self._chapters: self._read_sop_table('B.5') return self._chapters def _read_sop_table(self, chapter): table = self._find(self._get_doc_root(), - ['chapter[@label="B"]', 'section[@label="{}"]'.format(chapter), 'table', 'tbody']) + ['chapter[@label="B"]', + 'section[@label="{}"]'.format(chapter), 'table', + 'tbody']) if table is None: raise SpecReaderParseError('SOP Class table in Part 4 not found') row_nodes = self._findall(table, ['tr']) for row_node in row_nodes: column_nodes = self._findall(row_node, ['td']) - if len(column_nodes) == 3: + if len(column_nodes) in (3, 4): + # columns are SOP Class Name, SOP Class UID, IOD Specification + # and Specialization (only since 2020c) uid = self.cleaned_value(self._find_text(column_nodes[1])) target_node = self._find(column_nodes[2], ['para', 'olink']) if target_node is not None: diff --git a/dcm_spec_tools/spec_reader/part6_reader.py b/dcm_spec_tools/spec_reader/part6_reader.py index b5e989a..88172b5 100644 --- a/dcm_spec_tools/spec_reader/part6_reader.py +++ b/dcm_spec_tools/spec_reader/part6_reader.py @@ -1,8 +1,11 @@ """ Chapter6Reader collects DICOM Data Element information. -The information is taken from DICOM dictionary (PS3.6) in docbook format as provided by ACR NEMA. +The information is taken from DICOM dictionary (PS3.6) in docbook format +as provided by ACR NEMA. """ -from dcm_spec_tools.spec_reader.spec_reader import SpecReader, SpecReaderParseError +from dcm_spec_tools.spec_reader.spec_reader import ( + SpecReader, SpecReaderParseError +) class Part6Reader(SpecReader): @@ -17,7 +20,8 @@ def __init__(self, spec_dir): def data_elements(self): """Return the information about registered DICOM data elements. - The return value is a dict with the the tag ID (group/element tuple) as key. + The return value is a dict with the the tag ID (group/element tuple) + as key. See data_element() for the contained value. """ if self._data_elements is None: @@ -29,7 +33,8 @@ def data_element(self, tag_id): Arguments: tag_id: The tag ID as string in format (####,####) - The return value is a dict with the the tag ID (group/element tuple) as key. + The return value is a dict with the the tag ID (group/element tuple) + as key. The values of the retruned dict are dicts with the following entries: 'name': The human readable tag name 'vr': The tag value representation (e.g. 'PN') @@ -43,7 +48,8 @@ def _read_element_table(self): table = self._find(self._get_doc_root(), ['chapter[@label="6"]', 'table', 'tbody']) if table is None: - raise SpecReaderParseError('Registry of DICOM Data Elements not found in PS3.6') + raise SpecReaderParseError( + 'Registry of DICOM Data Elements not found in PS3.6') row_nodes = self._findall(table, ['tr']) attrib_indexes = [1, 3, 4, 5] for row_node in row_nodes: @@ -51,7 +57,8 @@ def _read_element_table(self): if len(column_nodes) == 6: tag_id = self._find_text(column_nodes[0]) if tag_id: - tag_attributes = [self._find_text(column_nodes[i]) for i in attrib_indexes] + tag_attributes = [self._find_text(column_nodes[i]) + for i in attrib_indexes] if tag_attributes is not None: self._data_elements[tag_id] = { 'name': tag_attributes[0], @@ -61,11 +68,14 @@ def _read_element_table(self): } def uids(self, uid_type): - """Return a dict of UID values (keys) and names for the given UID type.""" + """Return a dict of UID values (keys) and names for the given UID type. + """ return self._get_uids().get(uid_type, {}) def all_uids(self): - """Return a dict of UID types with UID value/name dicts for the given UID type as value.""" + """Return a dict of UID types with UID value/name dicts for the + given UID type as value. + """ return self._get_uids() def sop_class_uids(self): @@ -88,17 +98,24 @@ def _get_uids(self): table = self._find(self._get_doc_root(), ['chapter[@label="A"]', 'table', 'tbody']) if table is None: - raise SpecReaderParseError('Registry of DICOM Unique Identifiers not found in PS3.6') + raise SpecReaderParseError( + 'Registry of DICOM Unique Identifiers not found in PS3.6') row_nodes = self._findall(table, ['tr']) for row_node in row_nodes: column_nodes = self._findall(row_node, ['td']) - if len(column_nodes) == 4: - uid_attributes = [self._find_text(column_nodes[i]) for i in range(3)] + nr_columns = len(column_nodes) + if nr_columns in (4, 5): + # columns are UID Value, UID Name, UID Keyword (only + # since 2020d), UID Type and Part + uid_attributes = [self._find_text(column_nodes[i]) + for i in range(nr_columns - 1)] if uid_attributes is not None: - uid_type = uid_attributes[2] - # in PS3.6 xml there are multiple zero width (U+200B) spaces inside the UIDs + uid_type = uid_attributes[nr_columns - 2] + # in PS3.6 xml there are multiple zero width (U+200B) + # spaces inside the UIDs # we remove them hoping this is the only such problem uid_value = self.cleaned_value(uid_attributes[0]) - self._uids.setdefault(uid_type, {})[uid_value] = self.cleaned_value(uid_attributes[1]) + self._uids.setdefault(uid_type, {})[ + uid_value] = self.cleaned_value(uid_attributes[1]) return self._uids diff --git a/dcm_spec_tools/spec_reader/spec_reader.py b/dcm_spec_tools/spec_reader/spec_reader.py index 15fa899..2bac8ab 100644 --- a/dcm_spec_tools/spec_reader/spec_reader.py +++ b/dcm_spec_tools/spec_reader/spec_reader.py @@ -1,12 +1,10 @@ """ -SpecReader raeds information from DICOM standard files in docbook format as provided by ACR NEMA. +SpecReader reads information from DICOM standard files in docbook format as +provided by ACR NEMA. """ import os -try: - import xml.etree.cElementTree as ElementTree -except ImportError: - import xml.etree.ElementTree as ElementTree +import xml.etree.ElementTree as ElementTree class SpecReaderError(Exception): @@ -33,7 +31,8 @@ def __init__(self, spec_dir): self.part_nr = 0 document_files = os.listdir(self.spec_dir) if not document_files: - raise SpecReaderFileError(u'Missing docbook files in {}'.format(self.spec_dir)) + raise SpecReaderFileError( + u'Missing docbook files in {}'.format(self.spec_dir)) self._doc_trees = {} def _get_doc_tree(self): @@ -41,11 +40,16 @@ def _get_doc_tree(self): doc_name = 'part{:02}.xml'.format(self.part_nr) document_files = os.listdir(self.spec_dir) if doc_name not in document_files: - raise SpecReaderFileError(u'Missing docbook file {} in {}'.format(doc_name, self.spec_dir)) + raise SpecReaderFileError( + u'Missing docbook file {} in {}'.format( + doc_name, self.spec_dir)) try: - self._doc_trees[self.part_nr] = ElementTree.parse(os.path.join(self.spec_dir, doc_name)) + self._doc_trees[self.part_nr] = ElementTree.parse( + os.path.join(self.spec_dir, doc_name)) except ElementTree.ParseError: - raise SpecReaderFileError(u'Parse error in docbook file {} in {}'.format(doc_name, self.spec_dir)) + raise SpecReaderFileError( + u'Parse error in docbook file {} in {}'.format( + doc_name, self.spec_dir)) return self._doc_trees.get(self.part_nr) def _get_doc_root(self): @@ -54,18 +58,21 @@ def _get_doc_root(self): return doc_tree.getroot() def _find(self, node, elements): - search_string = '/'.join([self.docbook_ns + element for element in elements]) + search_string = '/'.join( + [self.docbook_ns + element for element in elements]) if node is not None: return node.find(search_string) def _findall(self, node, elements): - search_string = '/'.join([self.docbook_ns + element for element in elements]) + search_string = '/'.join( + [self.docbook_ns + element for element in elements]) return node.findall(search_string) def _find_text(self, node): try: para_node = self._find(node, ['para']) - text_parts = [text.strip() for text in para_node.itertext() if text.strip()] + text_parts = [text.strip() for text in para_node.itertext() if + text.strip()] return ' '.join(text_parts) if text_parts else '' except AttributeError: return '' diff --git a/dcm_spec_tools/spec_reader/tests/test_part6_reader.py b/dcm_spec_tools/spec_reader/tests/test_part6_reader.py index f421a32..6c9e40f 100644 --- a/dcm_spec_tools/spec_reader/tests/test_part6_reader.py +++ b/dcm_spec_tools/spec_reader/tests/test_part6_reader.py @@ -63,5 +63,6 @@ def test_sop_class_name(self): def test_sop_class_uid(self): self.assertEqual('1.2.840.10008.5.1.4.1.1.2', self.reader.sop_class_uid('CT Image Storage')) + if __name__ == '__main__': unittest.main() diff --git a/dcm_spec_tools/validator/tests/test_dicom_file_validator.py b/dcm_spec_tools/validator/tests/test_dicom_file_validator.py index 794d4e8..e3ab50c 100644 --- a/dcm_spec_tools/validator/tests/test_dicom_file_validator.py +++ b/dcm_spec_tools/validator/tests/test_dicom_file_validator.py @@ -17,9 +17,11 @@ class DicomFileValidatorTest(pyfakefs.fake_filesystem_unittest.TestCase): @classmethod def setUpClass(cls): - with open(os.path.join(json_fixture_path(), EditionReader.iod_info_json)) as info_file: + with open(os.path.join(json_fixture_path(), + EditionReader.iod_info_json)) as info_file: cls.iod_info = json.load(info_file) - with open(os.path.join(json_fixture_path(), EditionReader.module_info_json)) as info_file: + with open(os.path.join(json_fixture_path(), + EditionReader.module_info_json)) as info_file: cls.module_info = json.load(info_file) def setUp(self): @@ -54,16 +56,20 @@ def test_invalid_file(self): def test_missing_sop_class(self): filename = 'test.dcm' - file_dataset = FileDataset(filename, Dataset(), file_meta=self.create_metadata()) + file_dataset = FileDataset(filename, Dataset(), + file_meta=self.create_metadata()) write_file(filename, file_dataset, write_like_original=False) self.assert_fatal_error(filename, 'Missing SOPClassUID') def test_unknown_sop_class(self): dataset = Dataset() dataset.SOPClassUID = 'Unknown' - file_dataset = FileDataset('test', dataset, file_meta=self.create_metadata()) + file_dataset = FileDataset('test', dataset, + file_meta=self.create_metadata()) write_file('test', file_dataset, write_like_original=False) - self.assert_fatal_error('test', 'Unknown SOPClassUID (probably retired): Unknown') + self.assert_fatal_error( + 'test', + 'Unknown SOPClassUID (probably retired): Unknown') def test_validate_dir(self): self.fs.create_dir(os.path.join('foo', 'bar', 'baz')) @@ -80,7 +86,8 @@ def test_validate_dir(self): def test_non_fatal_errors(self): dataset = Dataset() dataset.SOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage - file_dataset = FileDataset('test', dataset, file_meta=self.create_metadata()) + file_dataset = FileDataset('test', dataset, + file_meta=self.create_metadata()) write_file('test', file_dataset, write_like_original=False) error_dict = self.validator.validate('test') self.assertEqual(1, len(error_dict)) diff --git a/dcm_spec_tools/validator/tests/test_iod_validator.py b/dcm_spec_tools/validator/tests/test_iod_validator.py index 9a95ef4..3887319 100644 --- a/dcm_spec_tools/validator/tests/test_iod_validator.py +++ b/dcm_spec_tools/validator/tests/test_iod_validator.py @@ -5,10 +5,7 @@ from dcm_spec_tools.spec_reader.edition_reader import EditionReader -try: - from pydicom.dataset import Dataset -except ImportError: - from dicom.dataset import Dataset +from pydicom.dataset import Dataset from dcm_spec_tools.tests.test_utils import json_fixture_path from dcm_spec_tools.validator.iod_validator import IODValidator @@ -23,9 +20,11 @@ class IODValidatorTest(unittest.TestCase): @classmethod def setUpClass(cls): - with open(os.path.join(json_fixture_path(), EditionReader.iod_info_json)) as info_file: + with open(os.path.join(json_fixture_path(), + EditionReader.iod_info_json)) as info_file: cls.iod_specs = json.load(info_file) - with open(os.path.join(json_fixture_path(), EditionReader.module_info_json)) as info_file: + with open(os.path.join(json_fixture_path(), + EditionReader.module_info_json)) as info_file: cls.module_specs = json.load(info_file) def setUp(self): @@ -33,7 +32,8 @@ def setUp(self): logging.disable(logging.CRITICAL) def validator(self, data_set): - return IODValidator(data_set, self.iod_specs, self.module_specs, None, logging.ERROR) + return IODValidator(data_set, self.iod_specs, self.module_specs, None, + logging.ERROR) @staticmethod def new_data_set(tags): @@ -66,7 +66,8 @@ def has_tag_error(messages, module_name, tag_id_string, error_kind): if not module_name in messages: return False for message in messages[module_name]: - if message.startswith('Tag {} is {}'.format(tag_id_string, error_kind)): + if message.startswith( + 'Tag {} is {}'.format(tag_id_string, error_kind)): return True return False @@ -83,13 +84,17 @@ def test_missing_tags(self): self.assertIn('CT Image', result) # PatientName is set - self.assertFalse(self.has_tag_error(result, 'Patient', '(0010,0010)', 'missing')) + self.assertFalse( + self.has_tag_error(result, 'Patient', '(0010,0010)', 'missing')) # PatientSex - type 2, missing - self.assertTrue(self.has_tag_error(result, 'Patient', '(0010,0040)', 'missing')) + self.assertTrue( + self.has_tag_error(result, 'Patient', '(0010,0040)', 'missing')) # Clinical Trial Sponsor Name -> type 1, but module usage U - self.assertFalse(self.has_tag_error(result, 'Patient', '(0012,0010)', 'missing')) + self.assertFalse( + self.has_tag_error(result, 'Patient', '(0012,0010)', 'missing')) # Patient Breed Description -> type 2C, but no parsable condition - self.assertFalse(self.has_tag_error(result, 'Patient', '(0010,2292)', 'missing')) + self.assertFalse( + self.has_tag_error(result, 'Patient', '(0010,2292)', 'missing')) def test_empty_tags(self): data_set = self.new_data_set({ @@ -103,13 +108,16 @@ def test_empty_tags(self): self.assertNotIn('fatal', result) self.assertIn('CT Image', result) # Modality - type 1, present but empty - self.assertTrue(self.has_tag_error(result, 'Patient', '(0010,0040)', 'missing')) + self.assertTrue( + self.has_tag_error(result, 'Patient', '(0010,0040)', 'missing')) # PatientName - type 2, empty tag is allowed - self.assertFalse(self.has_tag_error(result, 'Patient', '(0010,0010)', 'missing')) + self.assertFalse( + self.has_tag_error(result, 'Patient', '(0010,0010)', 'missing')) def test_fulfilled_condition_existing_tag(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'CArmPositionerTabletopRelationship': 'YES', 'SynchronizationTrigger': 'SET', 'FrameOfReferenceUID': '1.2.3.4.5.6.7.8', @@ -120,14 +128,16 @@ def test_fulfilled_condition_existing_tag(self): result = validator.validate() # Frame Of Reference UID Is and Synchronization Trigger set - self.assertFalse(self.has_tag_error(result, 'Enhanced X-Ray Angiographic Image', - '(0020,0052)', 'missing')) + self.assertFalse( + self.has_tag_error(result, 'Enhanced X-Ray Angiographic Image', + '(0020,0052)', 'missing')) self.assertFalse(self.has_tag_error(result, 'Synchronization', '(0018,106A)', 'missing')) def test_fulfilled_condition_missing_tag(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'CArmPositionerTabletopRelationship': 'YES', 'PatientName': 'XXX', 'PatientID': 'ZZZ' @@ -142,7 +152,8 @@ def test_fulfilled_condition_missing_tag(self): def test_condition_not_met_no_tag(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ' }) @@ -156,7 +167,8 @@ def test_condition_not_met_no_tag(self): def test_condition_not_met_existing_tag(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'FrameOfReferenceUID': '1.2.3.4.5.6.7.8', 'SynchronizationTrigger': 'SET', 'PatientName': 'XXX', @@ -175,7 +187,8 @@ def test_condition_not_met_existing_tag(self): def test_and_condition_not_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'SECONDARY', @@ -193,7 +206,8 @@ def test_and_condition_not_met(self): def test_only_one_and_condition_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'PRIMARY', @@ -211,7 +225,8 @@ def test_only_one_and_condition_met(self): def test_and_condition_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'MIXED', @@ -229,7 +244,8 @@ def test_and_condition_met(self): def test_presence_condition_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'PixelPaddingRangeLimit': '10', @@ -239,11 +255,13 @@ def test_presence_condition_met(self): result = validator.validate() self.assertTrue(self.has_tag_error(result, 'General Equipment', - '(0028,0120)', 'missing')) # Pixel Padding Value + '(0028,0120)', + 'missing')) # Pixel Padding Value def test_presence_condition_not_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'PixelPaddingRangeLimit': '10', @@ -252,11 +270,13 @@ def test_presence_condition_not_met(self): result = validator.validate() self.assertFalse(self.has_tag_error(result, 'General Equipment', - '(0028,0120)', 'missing')) # Pixel Padding Value + '(0028,0120)', + 'missing')) # Pixel Padding Value def test_greater_condition_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'SamplesPerPixel': 3 @@ -265,11 +285,13 @@ def test_greater_condition_met(self): result = validator.validate() self.assertTrue(self.has_tag_error(result, 'Image Pixel', - '(0028,0006)', 'missing')) # Planar configuration + '(0028,0006)', + 'missing')) # Planar configuration def test_greater_condition_not_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'SamplesPerPixel': 1 @@ -278,11 +300,13 @@ def test_greater_condition_not_met(self): result = validator.validate() self.assertFalse(self.has_tag_error(result, 'Image Pixel', - '(0028,0006)', 'missing')) # Planar configuration + '(0028,0006)', + 'missing')) # Planar configuration def test_points_to_condition_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'FrameIncrementPointer': 0x00181065 @@ -291,11 +315,13 @@ def test_points_to_condition_met(self): result = validator.validate() self.assertTrue(self.has_tag_error(result, 'Cardiac Synchronization', - '(0018,1086)', 'missing')) # Skip beats + '(0018,1086)', + 'missing')) # Skip beats def test_points_to_condition_not_met(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'FrameIncrementPointer': 0x00181055 @@ -304,11 +330,13 @@ def test_points_to_condition_not_met(self): result = validator.validate() self.assertFalse(self.has_tag_error(result, 'Cardiac Synchronization', - '(0018,1086)', 'missing')) # Skip beats + '(0018,1086)', + 'missing')) # Skip beats def test_condition_for_not_required_tag_cond1_fulfilled(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'ORIGINAL', @@ -318,11 +346,13 @@ def test_condition_for_not_required_tag_cond1_fulfilled(self): result = validator.validate() self.assertTrue(self.has_tag_error(result, 'Cardiac Synchronization', - '(0018,9085)', 'missing')) # Cardiac signal source + '(0018,9085)', + 'missing')) # Cardiac signal source def test_condition_for_not_required_tag_no_cond_fulfilled(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'ORIGINAL', @@ -333,11 +363,13 @@ def test_condition_for_not_required_tag_no_cond_fulfilled(self): result = validator.validate() self.assertTrue(self.has_tag_error(result, 'Cardiac Synchronization', - '(0018,9085)', 'not allowed')) # Cardiac signal source + '(0018,9085)', + 'not allowed')) # Cardiac signal source def test_condition_for_not_required_tag_cond2_fulfilled_present(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'DERIVED', @@ -348,11 +380,13 @@ def test_condition_for_not_required_tag_cond2_fulfilled_present(self): result = validator.validate() self.assertFalse(self.has_tag_error(result, 'Cardiac Synchronization', - '(0018,9085)', 'not allowed')) # Cardiac signal source + '(0018,9085)', + 'not allowed')) # Cardiac signal source def test_condition_for_not_required_tag_cond2_fulfilled_not_present(self): data_set = self.new_data_set({ - 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', # Enhanced X-Ray Angiographic Image + 'SOPClassUID': '1.2.840.10008.5.1.4.1.1.12.1.1', + # Enhanced X-Ray Angiographic Image 'PatientName': 'XXX', 'PatientID': 'ZZZ', 'ImageType': 'DERIVED', @@ -362,7 +396,8 @@ def test_condition_for_not_required_tag_cond2_fulfilled_not_present(self): result = validator.validate() self.assertFalse(self.has_tag_error(result, 'Cardiac Synchronization', - '(0018,9085)', 'missing')) # Cardiac signal source + '(0018,9085)', + 'missing')) # Cardiac signal source if __name__ == '__main__':