diff --git a/.github/actions/generate-sitrep/action.yml b/.github/actions/generate-sitrep/action.yml new file mode 100644 index 000000000..c41af48c6 --- /dev/null +++ b/.github/actions/generate-sitrep/action.yml @@ -0,0 +1,97 @@ +name: 'Generate Sitrep' + +description: 'Generates sitrep and badge JSON files using a Python script' + +inputs: + badge_label: + description: 'Label for the badge' + required: true + badge_filename: + description: 'Output badge filename' + required: true + sitrep_filename: + description: 'Output sitrep filename' + required: false + default: 'sitrep.json' + exit_status_patterns: + description: 'Glob pattern for exit status files' + required: false + metrics_logs: + description: 'Metrics log file' + required: false + exit_status_summary_file: + description: 'Summary message (overrides default)' + required: false + metrics_summary_file: + description: 'File containing metrics summary markdown' + required: false + badge_message: + description: 'Badge message (overrides default)' + required: false + badge_color: + description: 'Badge color (overrides default)' + required: false + tags: + description: 'Tags from the build' + required: false + digest: + description: 'Digest from the build' + required: false + outcome: + description: 'Outcome of the build' + required: false + github_run_id: + description: 'ID of the GH run' + required: false + github_output_file: + description: 'Github output file from actions' + required: false + github_step_summary: + description: 'Github tests summary file' + required: false + total_tests: + description: 'Total number of tests' + required: false + errors: + description: 'Total number of errors' + required: false + failed_tests: + description: 'Number of failed tests' + required: false + passed_tests: + description: 'Number of passed tests' + required: false +outputs: + STATUS: + description: 'The status of the tests (success or failure)' + value: ${{ steps.python-sitrep.outputs.STATUS }} + +runs: + using: 'composite' + steps: + - name: Run generate_sitrep.py + id: python-sitrep + shell: bash + run: | + python3 "${{ github.action_path }}/generate_sitrep.py" \ + --badge_label "${{ inputs.badge_label }}" \ + --badge_filename "${{ inputs.badge_filename }}" \ + --sitrep_filename "${{ inputs.sitrep_filename }}" \ + ${{ inputs.exit_status_patterns && format('--exit_status_patterns {0}', inputs.exit_status_patterns) || '' }} \ + ${{ inputs.exit_status_summary_file && format('--exit_status_summary_file "{0}"', inputs.exit_status_summary_file) || '' }} \ + ${{ inputs.metrics_logs && format('--metrics_logs "{0}"', inputs.metrics_logs) || '' }} \ + ${{ inputs.metrics_summary_file && format('--metrics_summary_file "{0}"', inputs.metrics_summary_file) || '' }} \ + ${{ inputs.badge_message && format('--badge_message "{0}"', inputs.badge_message) || '' }} \ + ${{ inputs.badge_color && format('--badge_color "{0}"', inputs.badge_color) || '' }} \ + ${{ inputs.tags && format('--tags "{0}"', inputs.tags) || '' }} \ + ${{ inputs.digest && format('--digest "{0}"', inputs.digest) || '' }} \ + ${{ inputs.outcome && format('--outcome "{0}"', inputs.outcome) || '' }} \ + ${{ inputs.github_run_id && format('--github_run_id "{0}"', inputs.github_run_id) || '' }} \ + ${{ inputs.github_output_file && format('--github_output_file "{0}"', inputs.github_output_file) || '' }} \ + ${{ inputs.github_step_summary && format('--github_step_summary "{0}"', inputs.github_step_summary) || '' }} \ + ${{ inputs.total_tests && format('--total_tests "{0}"', inputs.total_tests) || '' }} \ + ${{ inputs.errors && format('--errors "{0}"', inputs.errors) || '' }} \ + ${{ inputs.failed_tests && format('--failed_tests "{0}"', inputs.failed_tests) || '' }} \ + ${{ inputs.passed_tests && format('--passed_tests "{0}"', inputs.passed_tests) || '' }} \ + env: + GITHUB_RUN_ID: ${{ github.run_id }} diff --git a/.github/actions/generate-sitrep/generate_sitrep.py b/.github/actions/generate-sitrep/generate_sitrep.py new file mode 100755 index 000000000..de148e2f8 --- /dev/null +++ b/.github/actions/generate-sitrep/generate_sitrep.py @@ -0,0 +1,315 @@ +import argparse +import glob +import json +import os + + +def count_tests_from_exit_status_files(exit_status_files) -> tuple: + """ + Counts the number of passed and failed tests from exit status files. + + Args: + exit_status_files (list): List of file patterns containing exit status. + + Returns: + tuple: (passed_tests, failed_tests, total_tests) + """ + passed_tests = 0 + failed_tests = 0 + total_tests = len(exit_status_files) + for status_file in exit_status_files: + with open(status_file) as f: + status_data = json.load(f) + state = status_data.get('state') + exitcode = status_data.get('exitcode') + if state == 'COMPLETED' and exitcode == 0: + passed_tests += 1 + else: + failed_tests += 1 + return passed_tests, failed_tests, total_tests + + +def count_tests_from_metrics_logs(metrics_logs) -> tuple: + """ + Counts the number of passed and failed tests from metrics logs. + + Args: + metrics_logs (list): List of metrics log files. + + Returns: + tuple: (pytest_passed_tests, pytest_failed_tests, pytest_total_tests) + """ + pytest_passed_tests = 0 + pytest_failed_tests = 0 + pytest_total_tests = 0 + for metrics_log in metrics_logs: + with open(metrics_log) as f: + for line in f: + data = json.loads(line) + if data.get('$report_type') == 'TestReport' and data.get('when') == 'call': + outcome = data.get('outcome') + if outcome == 'passed': + pytest_passed_tests += 1 + elif outcome == 'failed': + pytest_failed_tests += 1 + pytest_total_tests += 1 + return pytest_passed_tests, pytest_failed_tests, pytest_total_tests + + +def determine_badge_color(passed_tests: int, failed_tests: int, total_tests: int): + """ + Determines the badge color based on test results. + + Args: + passed_tests (int): Number of passed tests. + failed_tests (int): Number of failed tests. + total_tests (int): Total number of tests. + pytest_passed_tests (int): Number of passed pytest tests (default=None). + pytest_failed_tests (int): Number of failed pytest tests (default=None). + pytest_total_tests (int): Total number of pytest tests (default=None). + + Returns: + badge_color, badge_message + """ + if failed_tests > 0 or total_tests == 0: + badge_message = 'error' + badge_color = 'red' + else: + badge_message = f"{passed_tests}/{total_tests} passed" + if failed_tests == 0: + badge_color = 'brightgreen' + else: + badge_color = 'yellow' + + return badge_color, badge_message + + +def write_exit_status_summary(exit_status_files, summary_filename, fw_name, github_step_summary=None): + """ + Generates a markdown summary of the exit status files. This function works for multi-node sitreps + + Args: + exit_status_files (list): List of exit status json files. + summary_filename (str): The filename to write the summary to. TODO shoudl we keep this constant? + fw_name (str): Framework name to include in the summary. + github_step_summary (str): Path to GITHUB_STEP_SUMMARY file (if any). + """ + with open(summary_filename, 'w') as f: + f.write(f"\n\n## {fw_name} MGMN+SPMD Test Status\n") + f.write("| Test Case | State | Exit Code |\n") + f.write("| --- | --- | --- |\n") + + for status_file in exit_status_files: + # Files are named --/-status.json + test_case = os.path.basename(status_file).replace('-status.json', '') + with open(status_file, 'r') as sf: + data = json.load(sf) + state = data.get('state') + exitcode = data.get('exitcode') + f.write(f"| {test_case} | {state} | {exitcode} |\n") + + # TODO append to GITHUB_STEP_SUMMARY + if github_step_summary and os.path.exists(github_step_summary): + with open(github_step_summary, 'a') as f_out: + with open(summary_filename, 'r') as f_in: + f_out.write(f_in.read()) + + +def write_metrics_summary(metrics_files: list, + summary_md_filename: str, + summary_json_filename: str, + fw_name:str, + github_step_summary=None): + """ + Generates a markdown and json summary of metrics files. + + Args: + metrics_files (list): List of metrics json files. + # TODO should we keep these two constant? + summary_md_filename (str): The filename to write the markdown summary to. This is "metrics_summary.md" + summary_json_filename (str): The filename to write the json summary to. This is "metrics_summary.json" + fw_name (str): Framework name to include in the summary. + github_step_summary (str): Path to GITHUB_STEP_SUMMARY file (if any). + """ + all_metrics = [] + header = None + # TODO improve readability of this ufnction + with open(summary_md_filename, 'w') as f_md: + f_md.write(f"## {fw_name} MGMN Test Metrics\n") + print_row = lambda lst: f_md.write('| ' + ' | '.join(str(el) for el in lst) + ' |\n') + + for path in metrics_files: + with open(path) as f: + obj = json.load(f) + all_metrics.append(obj) + if not header: + header = list(obj.keys()) + print_row(["Job Name"] + header) + print_row(["---"] * (1 + len(header))) + job_name = os.path.basename(path)[:-len('_metrics.json')] + print_row([job_name] + [obj[h] for h in header]) + + f_md.write('NOTE: Average step time includes compilation time and thus may be an underestimate of true performance\n') + + # Write the json summary + with open(summary_json_filename, 'w') as f_json: + json.dump(all_metrics, f_json, indent=4) + + # Optionally append to GITHUB_STEP_SUMMARY + if github_step_summary and os.path.exists(github_step_summary): + with open(github_step_summary, 'a') as f_out: + with open(summary_md_filename, 'r') as f_in: + f_out.write(f_in.read()) + + +def main() -> None: + """ + Main entry point + """ + parser = argparse.ArgumentParser(description='Generate sitrep and badge JSON files.') + parser.add_argument('--badge_label', required=True, help='Label for the badge') + parser.add_argument('--badge_filename', required=True, help='Output badge filename') + parser.add_argument('--sitrep_filename', default='sitrep.json', help='Output sitrep filename') + parser.add_argument('--exit_status_patterns', nargs='*', default=['**/*-status.json'], help='Tests with error output') + parser.add_argument('--metrics_logs', nargs='*', default=['metrics-*/*.log'], help='Metrics log file(s)') + parser.add_argument('--badge_message', help='Badge message (overrides default)') + parser.add_argument('--badge_color', help='Badge color (overrides default)') + parser.add_argument('--exit_status_summary_file', default="exit_status_summary.json", help='Output exit status summary markdown file') + parser.add_argument('--metrics_summary_file', help='Output metrics summary markdown file') + parser.add_argument('--tags', help='Tags from the build') + parser.add_argument('--digest', help='Digest from the build') + parser.add_argument('--outcome', help='Outcome of the build') + # mgmn parameters + parser.add_argument('--github_run_id', default=os.environ.get('GITHUB_RUN_ID'), help='GitHub Run ID') + parser.add_argument('--github_output_file', default=os.environ.get('GITHUB_OUTPUT'), help='GitHub output file for actions') + parser.add_argument('--github_step_summary', default=os.environ.get('GITHUB_STEP_SUMMARY'), help='GitHub step summary file') + # optional parameters + parser.add_argument('--total_tests', default=None, help='Total number of tests') + parser.add_argument('--errors', default=None, help='Number of errors') + parser.add_argument('--failed_tests', default=None, help='Number of failed tests') + parser.add_argument('--passed_tests', default=None, help='Number of passed tests') + + + args = parser.parse_args() + + # Set default patterns if not provided + if not args.exit_status_patterns: + args.exit_status_patterns = [f"{args.badge_label}*-{args.github_run_id}-*/*-status.json"] + if not args.metrics_logs: + args.metrics_logs = [f"{args.badge_label}-metrics-test-log/*_metrics.json"] + + # if we have outcome, then we can produce the badge immediately + if args.outcome: + sitrep_data = { + 'summary': f"{args.badge_label}: pass" if args.outcome == "success" else f"{args.badge_label}: fail", + 'badge_label': args.badge_label, + 'tags': args.tags, + 'digest': args.digest, + 'outcome': args.outcome, + } + with open(args.sitrep_filename, 'w') as f: + json.dump(sitrep_data, f, indent=2) + + badge_data = { + 'schemaVersion': 1, + 'label': args.badge_label, + 'message': "pass" if args.outcome == "success" else "fail", + 'color': "brightgreen" if args.outcome == "success" else "red" + } + with open(args.badge_filename, 'w') as f: + json.dump(badge_data, f, indent=2) + return + + # Count exit status tests + exit_status_files = [] + for pattern in args.exit_status_patterns: + exit_status_files.extend(glob.glob(pattern, recursive=True)) + + # Count metrics tests + metrics_logs = [] + for pattern in args.metrics_logs: + metrics_logs.extend(glob.glob(pattern, recursive=True)) + + # Collect metrics JSON files + metrics_files = [] + for pattern in args.metrics_json_patterns: + metrics_files.extend(glob.glob(pattern, recursive=True)) + + # Write exit status summary + if args.exit_status_summary_file: + write_exit_status_summary(exit_status_files, args.exit_status_summary_file, args.badge_label, args.github_step_summary) + + # Write metrics summary + if args.metrics_summary_file: + write_metrics_summary(metrics_files, args.metrics_summary_file, 'metrics_summary.json', args.badge_label, args.github_step_summary) + + # Count the number of tests passed to determine the success + if not args.passed_tests and not args.failed_tests and not args.total_tests: + passed_tests, failed_tests, total_tests = count_tests_from_exit_status_files(exit_status_files) + + badge_color, badge_message = determine_badge_color( + passed_tests, failed_tests, total_tests + ) + else: + passed_tests = args.passed_tests + failed_tests = args.failed_tests + total_tests = args.total_tests + badge_color, badge_message = determine_badge_color( + passed_tests, failed_tests, total_tests + ) + + summary = f"{args.badge_label}: {badge_message}" + + full_result_markdown = '' + if args.exit_status_summary_file and os.path.exists(args.exit_status_summary_file): + with open(args.exit_status_summary_file, 'r') as f: + full_result_markdown += f.read() + if args.metrics_summary_file and os.path.exists(args.metrics_summary_file): + with open(args.metrics_summary_file, 'r') as f: + full_result_markdown += f.read() + + sitrep_data = { + 'summary': summary, + 'total_tests': total_tests, + 'passed_tests': passed_tests, + 'failed_tests': failed_tests, + 'badge_label': args.badge_label, + 'badge_color': badge_color, + 'badge_message': badge_message, + 'full_result_markdown': full_result_markdown, + 'tags': args.tags, + 'digest': args.digest, + 'outcome': args.outcome, + } + if args.errors: + sitrep_data['errors'] = args.errors + + with open(args.sitrep_filename, 'w') as f: + json.dump(sitrep_data, f, indent=2) + + badge_data = { + 'schemaVersion': 1, + 'label': args.badge_label, + 'message': badge_message, + 'color': args.badge_color or badge_color + } + + with open(args.badge_filename, 'w') as f: + json.dump(badge_data, f, indent=2) + + # github_output = os.environ.get('GITHUB_OUTPUT') + # if github_output: + # with open(github_output, 'a') as fh: + # print(f'STATUS={status}', file=fh) + + # Check and display metrics summary + if os.path.exists('metrics_summary.json'): + print("metrics_summary.json exists:") + with open('metrics_summary.json', 'r') as f: + print(f.read()) + else: + print("metrics_summary.json does not exist.") + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/_build.yaml b/.github/workflows/_build.yaml index 076382f2b..ea354d2c6 100644 --- a/.github/workflows/_build.yaml +++ b/.github/workflows/_build.yaml @@ -167,38 +167,16 @@ jobs: ${{ inputs.EXTRA_BUILD_ARGS }} - name: Generate sitrep + id: sitrep if: "!cancelled()" - shell: bash -x -e {0} - run: | - # bring in utility functions - source .github/workflows/scripts/to_json.sh - - badge_label='${{ inputs.CONTAINER_NAME }} ${{ inputs.ARCHITECTURE }} build' - tags="${{ steps.final-metadata.outputs.tags }}" - digest="${{ steps.final-build.outputs.digest }}" - outcome="${{ steps.final-build.outcome }}" - - if [[ ${outcome} == "success" ]]; then - badge_message="pass" - badge_color=brightgreen - summary="${{ inputs.CONTAINER_NAME }} build on ${{ inputs.ARCHITECTURE }}: $badge_message" - else - badge_message="fail" - badge_color=red - summary="${{ inputs.CONTAINER_NAME }} build on ${{ inputs.ARCHITECTURE }}: $badge_message" - fi - - to_json \ - summary \ - badge_label tags digest outcome \ - > sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > ${{ env.BADGE_FILENAME_FULL }} + uses: ./.github/actions/generate-sitrep + with: + badge_label: '${{ inputs.CONTAINER_NAME }} ${{ inputs.ARCHITECTURE }} build' + tags: '${{ steps.final-metadata.outputs.tags }}' + digest: '${{ steps.final-build.outputs.digest }}' + outcome: '${{ steps.final-build.outcome }}' + badge_filename: ${{ env.BADGE_FILENAME_FULL }} + sitrep_filename: 'sitrep.json' - name: Upload sitrep and badge if: "!cancelled()" diff --git a/.github/workflows/_build_base.yaml b/.github/workflows/_build_base.yaml index 4a1ad84d6..bcec463bd 100644 --- a/.github/workflows/_build_base.yaml +++ b/.github/workflows/_build_base.yaml @@ -138,38 +138,16 @@ jobs: ${{ inputs.BASE_IMAGE != 'latest' && format('BASE_IMAGE={0}', inputs.BASE_IMAGE) || '' }} - name: Generate sitrep + id: sitrep if: "!cancelled()" - shell: bash -x -e {0} - run: | - # bring in utility functions - source .github/workflows/scripts/to_json.sh - - badge_label='Base image ${{ inputs.ARCHITECTURE }} build' - tags="${{ steps.meta.outputs.tags }}" - digest="${{ steps.build.outputs.digest }}" - outcome="${{ steps.build.outcome }}" - - if [[ ${outcome} == "success" ]]; then - badge_message="pass" - badge_color=brightgreen - summary="Base image build on ${{ inputs.ARCHITECTURE }}: $badge_message" - else - badge_message="fail" - badge_color=red - summary="Base image build on ${{ inputs.ARCHITECTURE }}: $badge_message" - fi - - to_json \ - summary \ - badge_label tags digest outcome \ - > sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > ${{ env.BADGE_FILENAME_FULL }} + uses: ./.github/actions/generate-sitrep + with: + badge_label: 'Base image ${{ inputs.ARCHITECTURE }} build' + tags: '${{ steps.meta.outputs.tags }}' + digest: '${{ steps.build.outputs.digest }}' + outcome: '${{ steps.build.outcome }}' + badge_filename: ${{ env.BADGE_FILENAME_FULL }} + sitrep_filename: 'sitrep.json' - name: Upload sitrep and badge if: "!cancelled()" diff --git a/.github/workflows/_build_rosetta.yaml b/.github/workflows/_build_rosetta.yaml index faa6c9d1a..90059bcfe 100644 --- a/.github/workflows/_build_rosetta.yaml +++ b/.github/workflows/_build_rosetta.yaml @@ -142,38 +142,16 @@ jobs: BASE_IMAGE=${{ steps.defaults.outputs.BASE_IMAGE }} - name: Generate sitrep + id: sitrep if: "!cancelled()" - shell: bash -x -e {0} - run: | - # bring in utility functions - source .github/workflows/scripts/to_json.sh - - badge_label='Rosetta ${{ inputs.BASE_LIBRARY }} ${{ inputs.ARCHITECTURE }} build' - tags="${{ steps.final-metadata.outputs.tags }}" - digest="${{ steps.final-build.outputs.digest }}" - outcome="${{ steps.final-build.outcome }}" - - if [[ ${outcome} == "success" ]]; then - badge_message="pass" - badge_color=brightgreen - summary="Rosetta ${{ inputs.BASE_LIBRARY }} build on ${{ inputs.ARCHITECTURE }}: $badge_message" - else - badge_message="fail" - badge_color=red - summary="Rosetta ${{ inputs.BASE_LIBRARY }} build on ${{ inputs.ARCHITECTURE }}: $badge_message" - fi - - to_json \ - summary \ - badge_label tags digest outcome \ - > sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > ${{ env.BADGE_FILENAME_FULL }} + uses: ./.github/actions/generate-sitrep + with: + badge_label: 'Rosetta ${{ inputs.BASE_LIBRARY }} {{ inputs.ARCHITECTURE }} build' + tags: '${{ steps.final-metadata.outputs.tags }}' + digest: '${{ steps.final-build.outputs.digest }}' + outcome: '${{ steps.final-build.outcome }}' + badge_filename: ${{ env.BADGE_FILENAME_FULL }} + sitrep_filename: 'sitrep.json' - name: Upload sitrep and badge if: "!cancelled()" diff --git a/.github/workflows/_sitrep_mgmn.yaml b/.github/workflows/_sitrep_mgmn.yaml index c017a6fd3..fd23c8863 100644 --- a/.github/workflows/_sitrep_mgmn.yaml +++ b/.github/workflows/_sitrep_mgmn.yaml @@ -32,121 +32,33 @@ jobs: - name: Download all artifacts from the previous jobs uses: actions/download-artifact@v4 - - name: Write exit status summary - id: exit-status - shell: bash -x -e {0} - run: | - # Glob after inputs.FW_NAME to capture things like rosetta-t5x-vit - EXIT_STATUSES="${{ inputs.FW_NAME }}*-${GITHUB_RUN_ID}-*/*-status.json" - EXIT_STATUS_SUMMARY_FILE="exit_status_summary.json" - echo -e "\n\n## ${{ inputs.FW_NAME }} MGMN+SPMD Test Status" >> $EXIT_STATUS_SUMMARY_FILE - cat <>$EXIT_STATUS_SUMMARY_FILE - | Test Case | State | Exit Code | - | --- | --- | --- | - EOF - - for i in $EXIT_STATUSES; do - # Files are named --/-status.json - echo "| $(basename $i -status.json) | $(jq -r .state $i) | $(jq -r .exitcode $i)" - done | tee -a $EXIT_STATUS_SUMMARY_FILE - - echo "Test statuses:" - jq -rc 'input_filename,.' $EXIT_STATUSES - - cat $EXIT_STATUS_SUMMARY_FILE >> $GITHUB_STEP_SUMMARY - echo "EXIT_STATUS_SUMMARY_FILE=$EXIT_STATUS_SUMMARY_FILE" >> ${GITHUB_OUTPUT} - - - name: Write metrics summary - id: metrics - shell: bash -x -e {0} - run: | - METRICS_SUMMARY_MD="metrics_summary.md" - METRICS_SUMMARY_JSON="metrics_summary.json" - - echo '## ${{ inputs.FW_NAME }} MGMN Test Metrics' | tee -a $METRICS_SUMMARY_MD - python <> $GITHUB_STEP_SUMMARY - echo "METRICS_SUMMARY_FILE=$METRICS_SUMMARY_MD" >> ${GITHUB_OUTPUT} + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' - name: Generate sitrep - id: gen-sitrep - shell: bash -x -e {0} + id: generate-sitrep + env: + GITHUB_OUTPUT: ${{ runner.temp }}/github_output.txt + GITHUB_STEP_SUMMARY: ${{ github.step_summary }} + uses: ./.github/actions/generate-sitrep # Path to your composite action + with: + badge_label: "${{ inputs.FW_NAME }} Tests" + badge_filename: "${{ inputs.BADGE_FILENAME }}" + exit_status_pattern: "${{ inputs.FW_NAME }}*-${{ github.run_id }}-*/*-status.json" + metrics_logs: "${{ inputs.FW_NAME }}-metrics-test-log/report.jsonl" + metrics_json_patterns: "${{ inputs.FW_NAME }}-metrics-test-log/*_metrics.json" + exit_status_summary_file: "exit_status_summary.md" + metrics_summary_file: "metrics_summary.md" + github_run_id: "${{ github.run_id }}" + github_output_file: "${{ env.GITHUB_OUTPUT }}" + github_step_summary: "${{ env.GITHUB_STEP_SUMMARY }}" + + - name: Read STATUS Output + id: read-status run: | - source .github/workflows/scripts/to_json.sh - - # Glob after inputs.FW_NAME to capture things like rosetta-t5x-vit - EXIT_STATUSES="${{ inputs.FW_NAME }}*-${GITHUB_RUN_ID}-*/*-status.json" - - passed_tests=$(jq -r '. | select ((.state == "COMPLETED") and (.exitcode == "0")) | .state' $EXIT_STATUSES | wc -l) - failed_tests=$(jq -r '. | select ((.state != "COMPLETED") or (.exitcode != "0")) | .state' $EXIT_STATUSES | wc -l) - total_tests=$(ls $EXIT_STATUSES | wc -l) - - METRICS_LOG=${{ inputs.FW_NAME }}-metrics-test-log/report.jsonl - all_outcomes() { - cat $METRICS_LOG | jq -r '. | select((.["$report_type"] == "TestReport") and (.when == "call")) | .outcome' - } - cnt_type() { - cat $METRICS_LOG | jq '. | select((.["$report_type"] == "TestReport") and (.when == "call") and (.outcome | contains("'${1}'"))) | .outcome' | wc -l - } - pytest_failed_tests=$(cnt_type failed) - pytest_passed_tests=$(cnt_type passed) - pytest_total_tests=$(all_outcomes | wc -l) - - if ([[ $failed_tests -eq 0 ]] && [[ $total_tests -gt 0 ]] && \ - [[ $pytest_failed_tests -eq 0 ]] && [[ $pytest_total_tests -gt 0 ]]); then - status=success - badge_color=brightgreen - elif [[ $passed_tests -eq 0 ]] || [[ $pytest_passed_tests -eq 0 ]]; then - status=failure - badge_color=red - else - status=failure - badge_color=yellow - fi - badge_message="${passed_tests}/${total_tests} jobs | ${pytest_passed_tests}/${pytest_total_tests} metrics" - - badge_label='${{ inputs.FW_NAME }} Tests' - summary="# ${{ inputs.FW_NAME }} MGMN Test: $badge_message" - full_result_markdown=$(cat ${{ steps.exit-status.outputs.EXIT_STATUS_SUMMARY_FILE }}) - full_result_markdown+=$(cat ${{ steps.metrics.outputs.METRICS_SUMMARY_FILE }}) - - to_json \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - full_result_markdown \ - > sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > ${{ inputs.BADGE_FILENAME }} - - echo "STATUS=${status}" >> ${GITHUB_OUTPUT} + echo "STATUS=$(grep '^STATUS=' '${{ runner.temp }}/github_output.txt' | cut -d'=' -f2-)" >> $GITHUB_OUTPUT - name: Check and display metrics summary run: | diff --git a/.github/workflows/_test_pax_rosetta.yaml b/.github/workflows/_test_pax_rosetta.yaml index c0712574f..76be7ed3c 100644 --- a/.github/workflows/_test_pax_rosetta.yaml +++ b/.github/workflows/_test_pax_rosetta.yaml @@ -166,51 +166,18 @@ jobs: - name: Generate sitrep if: success() || failure() - shell: bash -x -e {0} - run: | - # bring in utility functions - cd $GITHUB_WORKSPACE - source .github/workflows/scripts/to_json.sh - - EXIT_STATUSES="output/*-status.json" - badge_label='ROSETTA PAX SINGLE PROCESS MULTI DEVICE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}' - passed_tests=$(jq -r '. | select ((.state == "COMPLETED") and (.exitcode == "0")) | .state' $EXIT_STATUSES | wc -l) - failed_tests=$(jq -r '. | select ((.state != "COMPLETED") or (.exitcode != "0")) | .state' $EXIT_STATUSES | wc -l) - total_tests=$(ls $EXIT_STATUSES | wc -l) - - if [[ ${failed_tests} > 0 ]] || [[ ${total_tests} == 0 ]]; then - badge_message='error' - badge_color=red - summary="ROSETTA PAX SINGLE PROCESS MULTI DEVICE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - else - badge_message="${passed_tests}/${total_tests} passed" - if [[ ${failed_tests} == 0 ]]; then - badge_color=brightgreen - else - badge_color=yellow - fi - summary="ROSETTA PAX SINGLE PROCESS MULTI DEVICE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - fi - - to_json \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > output/sitrep.json + uses: ./.github/actions/generate-sitrep + with: + --badge_label: "ROSETTA PAX SINGLE PROCESS MULTI DEVICE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}" + --exit_status_pattern: "output/*-status.json" + --badge_filename: "${{ inputs.BADGE_FILENAME }}" - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > output/${{ env.BADGE_FILENAME_PREFIX }}-${{ steps.meta.outputs.TEST_CASE_NAME }}.json - - name: Upload training logs as artifacts uses: actions/upload-artifact@v4 with: name: ${{ steps.meta.outputs.JOB_NAME }} path: output/* - + rosetta-pax-multi-node-te: strategy: max-parallel: 1 @@ -392,45 +359,12 @@ jobs: - name: Generate sitrep if: success() || failure() - shell: bash -x -e {0} - run: | - # bring in utility functions - cd $GITHUB_WORKSPACE - source .github/workflows/scripts/to_json.sh - - EXIT_STATUSES="output/*-status.json" - badge_label='ROSETTA PAX MULTI NODE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}' - passed_tests=$(jq -r '. | select ((.state == "COMPLETED") and (.exitcode == "0")) | .state' $EXIT_STATUSES | wc -l) - failed_tests=$(jq -r '. | select ((.state != "COMPLETED") or (.exitcode != "0")) | .state' $EXIT_STATUSES | wc -l) - total_tests=$(ls $EXIT_STATUSES | wc -l) - - if [[ ${failed_tests} > 0 ]] || [[ ${total_tests} == 0 ]]; then - badge_message='error' - badge_color=red - summary="ROSETTA PAX MULTI NODE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - else - badge_message="${passed_tests}/${total_tests} passed" - if [[ ${failed_tests} == 0 ]]; then - badge_color=brightgreen - else - badge_color=yellow - fi - summary="ROSETTA PAX MULTI NODE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - fi + uses: ./.github/actions/generate-sitrep + with: + --badge_label: "ROSETTA PAX SINGLE MULTI NODE TE ${{ steps.meta.outputs.TEST_CASE_NAME }}" + --exit_status_pattern: "output/*-status.json" + --badge_filename: "${{ inputs.BADGE_FILENAME }}" - to_json \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > output/sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > output/${{ env.BADGE_FILENAME_PREFIX }}-${{ steps.meta.outputs.TEST_CASE_NAME }}.json - - name: Upload training logs as artifacts uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/_test_t5x_rosetta.yaml b/.github/workflows/_test_t5x_rosetta.yaml index 8d13fe39e..535c79036 100644 --- a/.github/workflows/_test_t5x_rosetta.yaml +++ b/.github/workflows/_test_t5x_rosetta.yaml @@ -170,47 +170,15 @@ jobs: dump = {'state': "${{ steps.submit.outputs.SLURM_STATE }}", 'exitcode': "${{ steps.submit.outputs.SLURM_EXITCODE }}"} json.dump(dump, f) EOF - + - name: Generate sitrep if: success() || failure() - shell: bash -x -e {0} - run: | - # bring in utility functions - cd $GITHUB_WORKSPACE - source .github/workflows/scripts/to_json.sh - - EXIT_STATUSES="output/*-status.json" - badge_label='ROSETTA T5X SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}' - passed_tests=$(jq -r '. | select ((.state == "COMPLETED") and (.exitcode == "0")) | .state' $EXIT_STATUSES | wc -l) - failed_tests=$(jq -r '. | select ((.state != "COMPLETED") or (.exitcode != "0")) | .state' $EXIT_STATUSES | wc -l) - total_tests=$(ls $EXIT_STATUSES | wc -l) - - if [[ ${failed_tests} > 0 ]] || [[ ${total_tests} == 0 ]]; then - badge_message='error' - badge_color=red - summary="ROSETTA T5X SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - else - badge_message="${passed_tests}/${total_tests} passed" - if [[ ${failed_tests} == 0 ]]; then - badge_color=brightgreen - else - badge_color=yellow - fi - summary="ROSETTA T5X SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - fi - - to_json \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > output/sitrep.json + uses: ./.github/actions/generate-sitrep + with: + --badge_label: 'ROSETTA T5X VIT SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}' + --badge_filename: "${{ env.BADGE_FILENAME_FULL }}" + --sitrep_filename: "sitrep.json" - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > output/${{ env.BADGE_FILENAME_PREFIX }}-${{ steps.meta.outputs.TEST_CASE_NAME }}.json - name: Upload training logs as artifacts uses: actions/upload-artifact@v4 @@ -369,45 +337,12 @@ jobs: - name: Generate sitrep if: success() || failure() - shell: bash -x -e {0} - run: | - # bring in utility functions - cd $GITHUB_WORKSPACE - source .github/workflows/scripts/to_json.sh - - EXIT_STATUSES="output/*-status.json" - badge_label='ROSETTA T5X MULTI GPU MULTI NODE ${{ steps.meta.outputs.TEST_CASE_NAME }}' - passed_tests=$(jq -r '. | select ((.state == "COMPLETED") and (.exitcode == "0")) | .state' $EXIT_STATUSES | wc -l) - failed_tests=$(jq -r '. | select ((.state != "COMPLETED") or (.exitcode != "0")) | .state' $EXIT_STATUSES | wc -l) - total_tests=$(ls $EXIT_STATUSES | wc -l) - - if [[ ${failed_tests} > 0 ]] || [[ ${total_tests} == 0 ]]; then - badge_message='error' - badge_color=red - summary="ROSETTA T5X MULTI GPU MULTI NODE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - else - badge_message="${passed_tests}/${total_tests} passed" - if [[ ${failed_tests} == 0 ]]; then - badge_color=brightgreen - else - badge_color=yellow - fi - summary="ROSETTA T5X MULTI GPU MULTI NODE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - fi - - to_json \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > output/sitrep.json + uses: ./.github/actions/generate-sitrep + with: + --badge_label: 'ROSETTA T5X VIT MULTI GPU MULTI NODE ${{ steps.meta.outputs.TEST_CASE_NAME }}' + --badge_filename: "${{ env.BADGE_FILENAME_FULL }}" + --sitrep_filename: "sitrep.json" - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > output/${{ env.BADGE_FILENAME_PREFIX }}-${{ steps.meta.outputs.TEST_CASE_NAME }}.json - - name: Upload training logs as artifacts uses: actions/upload-artifact@v4 with: @@ -538,44 +473,11 @@ jobs: - name: Generate sitrep if: success() || failure() - shell: bash -x -e {0} - run: | - # bring in utility functions - cd $GITHUB_WORKSPACE - source .github/workflows/scripts/to_json.sh - - EXIT_STATUSES="output/*-status.json" - badge_label='ROSETTA T5X VIT SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}' - passed_tests=$(jq -r '. | select ((.state == "COMPLETED") and (.exitcode == "0")) | .state' $EXIT_STATUSES | wc -l) - failed_tests=$(jq -r '. | select ((.state != "COMPLETED") or (.exitcode != "0")) | .state' $EXIT_STATUSES | wc -l) - total_tests=$(ls $EXIT_STATUSES | wc -l) - - if [[ ${failed_tests} > 0 ]] || [[ ${total_tests} == 0 ]]; then - badge_message='error' - badge_color=red - summary="ROSETTA T5X VIT SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - else - badge_message="${passed_tests}/${total_tests} passed" - if [[ ${failed_tests} == 0 ]]; then - badge_color=brightgreen - else - badge_color=yellow - fi - summary="ROSETTA T5X VIT SINGLE PROCESS MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}: $badge_message" - fi - - to_json \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > output/sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > output/${{ env.BADGE_FILENAME_PREFIX }}-${{ steps.meta.outputs.TEST_CASE_NAME }}.json + uses: ./.github/actions/generate-sitrep + with: + --badge_label: 'ROSETTA T5X MULTI GPU MULTI DEVICE ${{ steps.meta.outputs.TEST_CASE_NAME }}' + --badge_filename: "${{ env.BADGE_FILENAME_FULL }}" + --sitrep_filename: "sitrep.json" - name: Upload training logs as artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/_test_te.yaml b/.github/workflows/_test_te.yaml index be66102b8..b09567928 100644 --- a/.github/workflows/_test_te.yaml +++ b/.github/workflows/_test_te.yaml @@ -68,49 +68,15 @@ jobs: with: pattern: | ${{ inputs.ARTIFACT_PREFIX }}-* - merge-multiple: true + merge-multiple: true'ROSETTA T5X MULTI GPU MULTI NODE ${{ steps.meta.outputs.TEST_CASE_NAME }}' - name: Generate sitrep - shell: bash -x -e {0} - run: | - # bring in utility functions - source .github/workflows/scripts/to_json.sh - - test_outcome_files=$(find -name pytest-report.jsonl) - - badge_label='TE Multi GPU tests' - passed_tests=$(cat ${test_outcome_files} | jq -r 'select(."$report_type" == "CollectReport" and .outcome == "passed") | .outcome' | wc -l) - failed_tests=$(cat ${test_outcome_files} | jq -r 'select(."$report_type" == "CollectReport" and .outcome == "failed") | .outcome' | wc -l) - total_tests=$((failed_tests + passed_tests)) - - if [[ ${total_tests} == 0 ]]; then - badge_message='error' - badge_color=red - summary='TE multi GPU tests did not complete due to errors.' - else - badge_message="${passed_tests}/${total_tests} passed" - if [[ ${failed_tests} == 0 ]]; then - badge_color=brightgreen - else - badge_color=yellow - fi - summary="TE multi GPU tests : $badge_message" - fi - - run_id=${{ github.run_id }} \ - to_json \ - run_id \ - summary \ - total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${badge_message}" \ - color="${badge_color}" \ - to_json schemaVersion label message color \ - > ${{ env.BADGE_FILENAME_FULL }} + if: success() || failure() + uses: ./.github/actions/generate-sitrep + with: + --badge_label: "TE Multi GPU tests" + --badge_filename: "${{ env.BADGE_FILENAME_FULL }}" + --sitrep_filename: "sitrep.json" - name: Upload training logs as artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/_test_unit.yaml b/.github/workflows/_test_unit.yaml index d820eb348..9073ec77a 100644 --- a/.github/workflows/_test_unit.yaml +++ b/.github/workflows/_test_unit.yaml @@ -106,35 +106,22 @@ jobs: echo "summary=${{ inputs.TEST_NAME }} unit test on ${{ matrix.GPU_ARCH }}: ${total_tests} total tests, ${errors} errors, ${passed_tests} passed, ${failed_tests} failed." >> $GITHUB_OUTPUT fi + - name: Write summary to file + run: | + echo "${{ steps.summary.outputs.summary }}" > summary.md + + # TODO here we'll have to fix the statistics_script so this is all handled in python - name: Generate sitrep id: sitrep if: "!cancelled()" - shell: bash -x -e {0} - run: | - # bring in utility functions - source .github/workflows/scripts/to_json.sh - - badge_label='${{ inputs.TEST_NAME }} ${{ matrix.GPU_ARCH }} Unit' - - total_tests=${{ steps.test-stats.outputs.TOTAL_TESTS }} \ - errors=${{ steps.test-stats.outputs.ERRORS }} \ - failed_tests=${{ steps.test-stats.outputs.FAILED_TESTS }} \ - passed_tests=${{ steps.test-stats.outputs.PASSED_TESTS }} \ - summary="${{ steps.summary.outputs.summary }}" \ - badge_message="${{ steps.summary.outputs.badge_message }}" \ - badge_color="${{ steps.summary.outputs.badge_color }}" \ - to_json \ - summary \ - errors total_tests passed_tests failed_tests \ - badge_label badge_color badge_message \ - > sitrep.json - - schemaVersion=1 \ - label="${badge_label}" \ - message="${{ steps.summary.outputs.badge_message }}" \ - color="${{ steps.summary.outputs.badge_color }}" \ - to_json schemaVersion label message color \ - > ${{ env.BADGE_FILENAME_FULL }} + uses: ./.github/actions/generate-sitrep + with: + badge_label: '${{ inputs.TEST_NAME }} ${{ matrix.GPU_ARCH }} Unit' + badge_filename: ${{ env.BADGE_FILENAME_FULL }} + sitrep_filename: 'sitrep.json' + exit_status_summary_file: "summary.md" + badge_message: "${{ steps.summary.outputs.badge_message }}" + badge_color: ${{ steps.summary.outputs.badge_color }} - name: Upload artifacts if: "!cancelled()"