diff --git a/.github/workflows/nightly-v4-integtest.yml b/.github/workflows/nightly-v4-integtest.yml index c202269a..833225c4 100644 --- a/.github/workflows/nightly-v4-integtest.yml +++ b/.github/workflows/nightly-v4-integtest.yml @@ -75,16 +75,19 @@ jobs: if: always() needs: [make_nightly_tag, integration_tests] steps: - - name: Surface failing tests - #uses: pmeier/pytest-results-action@8104ed7b3d3ba4bb0d550e406fc26aa756630fcc - uses: andrewmogan/pytest-results-action@8b7955ab36427dc46b0f00ea7d4e66a75b7cc943 - env: - NIGHTLY_TAG: ${{needs.make_nightly_tag.outputs.tag}} + - name: Checkout daq-release + uses: actions/checkout@v4 with: - path: ${{ github.workspace }}/integration_tests_${{ env.NIGHTLY_TAG }}/*_test_results.xml - summary: true - display-options: fEX - fail-on-empty: true + repository: DUNE-DAQ/daq-release + path: daq-release-integtest + ref: amogan/integtest_markdown_parser + - name: Generate summary tables + env: + NIGHTLY_TAG: ${{ needs.make_nightly_tag.outputs.tag}} + run: | + cd daq-release-integtest/scripts/github-ci/ + python integtest_xml_parser.py --input-directory ${{ github.workspace }}/integration_tests_${{ env.NIGHTLY_TAG }} + cat pytest_summary_table.md >> $GITHUB_STEP_SUMMARY cleanup_xml_files: runs-on: daq diff --git a/.github/workflows/nightly-v5-integtest.yml b/.github/workflows/nightly-v5-integtest.yml index b433d311..8ce9c7ad 100644 --- a/.github/workflows/nightly-v5-integtest.yml +++ b/.github/workflows/nightly-v5-integtest.yml @@ -118,33 +118,38 @@ jobs: pytest -v -s --junit-xml=${{ matrix.test_name }}_results.xml \ $TEST_PATH/${{ matrix.test_name }}.py - #$LISTREV_SHARE/integtest/listrev_test.py parse_results: runs-on: daq if: always() needs: [make_release_tag, integration_tests] steps: - - name: Surface failing tests - #uses: pmeier/pytest-results-action@8104ed7b3d3ba4bb0d550e406fc26aa756630fcc - uses: andrewmogan/pytest-results-action@8b7955ab36427dc46b0f00ea7d4e66a75b7cc943 - env: - RELEASE_TAG: ${{ needs.make_release_tag.outputs.tag}} + - name: Checkout daq-release + uses: actions/checkout@v4 with: - path: ${{ github.workspace }}/integration_tests_${{ env.RELEASE_TAG }}/*_results.xml - summary: true - display-options: fEX - fail-on-empty: true + repository: DUNE-DAQ/daq-release + path: daq-release-integtest + ref: amogan/integtest_markdown_parser + - name: Generate summary tables + env: + RELEASE_TAG: ${{needs.make_release_tag.outputs.tag}} + run: | + cd daq-release-integtest/scripts/github-ci/ + python integtest_xml_parser.py --input-directory ${{ github.workspace }}/integration_tests_$RELEASE_TAG + cat pytest_summary_table.md >> $GITHUB_STEP_SUMMARY - cleanup_xml_files: + cleanup_files: runs-on: daq if: always() needs: [make_release_tag, parse_results] steps: - name: Remove xml files env: - RELEASE_TAG: ${{ needs.make_release_tag.outputs.tag }} + RELEASE_TAG: ${{needs.make_release_tag.outputs.tag}} + run: | + rm -rf ${{ github.workspace }}/integration_tests_$RELEASE_TAG + - name: Remove daq-release run: | - rm -rf ${{ github.workspace }}/integration_tests_${{ env.RELEASE_TAG }} + rm -rf ${{ github.workspace }}/daq-release-integtest/ # Integration tests can sometimes collide with stale processes, leading to timeout cleanup_stale_gunicorn_processes: diff --git a/scripts/github-ci/integtest_xml_parser.py b/scripts/github-ci/integtest_xml_parser.py new file mode 100644 index 00000000..bdf6019f --- /dev/null +++ b/scripts/github-ci/integtest_xml_parser.py @@ -0,0 +1,138 @@ +import os +import argparse +from pathlib import Path +import xml.etree.ElementTree as ET + +def get_xml_files(directory, pattern): + path = Path(directory) + return path.rglob(pattern) + +def get_test_name(file_path): + file_name = os.path.basename(file_path) + # Results file names should look like "minimal_system_quick_test_results.xml" + return file_name.replace('_results.xml', '') + +def parse_junit_xml(file_path): + tree = ET.parse(file_path) + root = tree.getroot() + + test_suite_name = get_test_name(file_path) + + results = [] + for testcase in root.findall(".//testcase"): + test_name = testcase.get("name").split("[")[0] + result = "passed" + + if testcase.find("failure") is not None: + result = "failed" + elif testcase.find("error") is not None: + result = "error" + elif testcase.find("skipped") is not None: + result = "skipped" + + results.append({ + "test_suite_name": test_suite_name, + "test_name": test_name, + "result": result + }) + return results + +def which_emoji(test_status): + emoji_map = { + 'passed': ':white_check_mark:', + 'failed': ':x:', + 'skipped': ':grey_question:' + } + return emoji_map.get(test_status, ':shrug:') + +def format_markdown_row(test): + emoji = which_emoji(test['result']) + return f"| {test['test_name']} | {emoji} {test['result']} |\n" + +def generate_markdown_table(results, output_filename): + with open(output_filename, 'w') as f: + for idx, result in enumerate(results): + f.write(f"# {result[0]['test_suite_name']} Results\n") + f.write("| Test Case | Status |\n") + f.write("|-----------|--------|\n") + f.writelines(format_markdown_row(test) for test in result) + + if not os.path.exists(output_filename): + raise FileNotFoundError(f"There was a problem writing the output markdown file: {output_filename}") + + print(f"Markdown summary generated at {output_filename}") + +def prepend_test_summary(markdown_file): + num_passed = 0 + num_failed = 0 + num_skipped = 0 + total_tests = 0 + with open(markdown_file, 'r') as ifile: + original_lines = ifile.readlines() + for line in original_lines: + print(line) + #print(line) + if 'passed' in line: + num_passed += 1 + total_tests += 1 + elif 'failed' in line: + num_failed += 1 + total_tests += 1 + elif 'skipped' in line: + num_skipped += 1 + total_tests += 1 + + print('Passed:', num_passed) + print('Failed:', num_failed) + + summary = f"{num_passed} passed and {num_failed} failed of {total_tests} total tests.\n" + new_lines = [summary] + original_lines + with open(markdown_file, 'w') as ofile: + ofile.writelines(new_lines) + +def main(): + parser = argparse.ArgumentParser(description="Parse a JUnit XML file and extract test case results.") + parser.add_argument("--input-directory", "-d", + help="Path to the directory containing junit xml files.") + parser.add_argument("--input-file", "-i", + help="Path to a single JUnit XML file. Cannot be used in conjunction with --input-directory") + parser.add_argument("--output-markdown-file", "-o", + default="pytest_summary_table.md", + help="Name of the output file containing the markdown summary table. Default: ./pytest_summary_table.md") + + args = parser.parse_args() + + if args.input_directory and args.input_file: + print(f"Error: You must specify either an input directory or a specific file, not both.") + exit(1) + + test_results = [] + + if args.input_directory: + if not os.path.isdir(args.input_directory): + print(f"Error: {args.input_directory} is not a valid directory.") + exit(2) + + xml_files = get_xml_files(args.input_directory, "*.xml") + if not xml_files: + print(f"Error: No xml files found in {args.input_directory}.") + exit(3) + + for file in xml_files: + test_results.append(parse_junit_xml(file)) + + elif args.input_file: + if not os.path.isfile(args.input_file): + print(f"Error: Input file {args.input_file} is invalid.") + exit(4) + + test_results.append(parse_junit_xml(args.input_file)) + else: + print(f"Error: No input file or directory specified. Exiting...") + exit(5) + + generate_markdown_table(test_results, args.output_markdown_file) + prepend_test_summary(args.output_markdown_file) + +if __name__ == "__main__": + main()