Skip to content

Commit

Permalink
#0: Switch to google benchmark for pgm dispatch tests
Browse files Browse the repository at this point in the history
  • Loading branch information
jbaumanTT committed Dec 19, 2024
1 parent 4bfb135 commit 9437e7a
Show file tree
Hide file tree
Showing 7 changed files with 2,494 additions and 52 deletions.
1 change: 1 addition & 0 deletions .clang-format-ignore
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ tests/tt_metal/test_utils/env_vars.hpp
tests/tt_metal/tt_metal/api/allocator/test_free_list_opt_allocator.cpp
tests/tt_metal/tt_metal/api/test_global_semaphores.cpp
tests/tt_metal/tt_metal/dispatch/sub_device_test_utils.hpp
tests/tt_metal/tt_metal/perf_microbenchmark/dispatch/pgm_dispatch_golden.json
tests/tt_metal/tt_metal/perf_microbenchmark/10_dram_read_remote_cb_sync/test_dram_read_remote_cb.cpp
tests/tt_metal/tt_metal/perf_microbenchmark/11_remote_cb_sync_matmul_single_core/test_remote_cb_sync_matmul.cpp
tests/tt_metal/tt_metal/perf_microbenchmark/routing/kernels/traffic_gen_rx.cpp
Expand Down
8 changes: 7 additions & 1 deletion .github/workflows/fast-dispatch-frequent-tests-impl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
name: "WH N300 pgm dispatch nightly",
arch: wormhole_b0,
runs-on: ["cloud-virtual-machine", "N300", "in-service"],
cmd: ./tests/tt_metal/tt_metal/perf_microbenchmark/dispatch/compare_pgm_dispatch_perf_ci.sh,
cmd: ./tests/tt_metal/tt_metal/perf_microbenchmark/dispatch/compare_pgm_dispatch_perf_ci.py,
timeout: 10
},
]
Expand Down Expand Up @@ -57,3 +57,9 @@ jobs:
path: |
generated/test_reports/
prefix: "test_reports_"
- uses: ./.github/actions/upload-artifact-with-job-uuid
if: ${{ !cancelled() }}
with:
path: |
bench.json
prefix: "bench_json_"
3 changes: 3 additions & 0 deletions dependencies/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -111,3 +111,6 @@ CPMAddPackage(
OPTIONS
"XTENSOR_ENABLE_TESTS OFF"
)


CPMAddPackage(NAME benchmark GITHUB_REPOSITORY google/benchmark GIT_TAG v1.9.1)
2 changes: 2 additions & 0 deletions tests/tt_metal/tt_metal/perf_microbenchmark/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -77,4 +77,6 @@ foreach(TEST_SRC ${PERF_MICROBENCH_TESTS_SRCS})
list(APPEND PERF_MICROBENCH_TEST_TARGETS ${TEST_TARGET})
endforeach()

target_link_libraries(test_pgm_dispatch PUBLIC benchmark::benchmark)

add_custom_target(metal_perf_microbenchmark_tests DEPENDS ${PERF_MICROBENCH_TEST_TARGETS})
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#!/usr/bin/python3

import json
import os
import sys

os.chdir(os.getenv("TT_METAL_HOME"))
golden = json.load(open("tests/tt_metal/tt_metal/perf_microbenchmark/dispatch/pgm_dispatch_golden.json", "r"))

THRESHOLD=4
result = os.system("build/test/tt_metal/perf_microbenchmark/dispatch/test_pgm_dispatch --benchmark_out_format=json --benchmark_out=bench.json")
if result != 0:
print(f"Test failed with error code {result}")
sys.exit(result)

result = json.load(open("bench.json", "r"))

golden_benchmarks = {}
for benchmark in golden["benchmarks"]:
golden_benchmarks[benchmark["name"]] = benchmark

result_benchmarks = {}
for benchmark in result["benchmarks"]:
result_benchmarks[benchmark["name"]] = benchmark

exit_code = 0

for name, benchmark in golden_benchmarks.items():
if name not in result_benchmarks:
print(f"Golden enchmark {name} missing from results")
exit_code = 1
continue
result = result_benchmarks[benchmark["name"]]

if "error_occurred" in benchmark:
if "error_occurred" not in result:
print(f"Error in {name} was fixed in result. Consider adjusting baselines.")
continue

if "error_occurred" in result:
if "error_occurred" not in benchmark:
print(f"Benchmark {name} gave error {result['error_message']}")
exit_code = 1
continue

golden_time = benchmark["IterationTime"]
result_time = result["IterationTime"]
if result_time / golden_time > (1 + THRESHOLD / 100):
print(f"Test {name} expected value {golden_time} but got {result_time}")
exit_code = 1
if golden_time / result_time > (1 + THRESHOLD / 100):
print(f"Test {name} got value {result_time} but expected {golden_time}. Consider adjusting baselines")

for name in result_benchmarks:
if name not in golden_benchmarks:
print(f"Result benchmark {name} missing from goldens")
exit_code = 1

if exit_code == 0:
print("Test successful")
sys.exit(exit_code)
Loading

0 comments on commit 9437e7a

Please sign in to comment.