-
Notifications
You must be signed in to change notification settings - Fork 116
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Trying again: Revert Fix Minimal All Reduce for Llama shapes (#18731)
### Description This is a duplicate PR for #18217 which got reverted due to symlink issue. The issue is now fixed and post-commit pipelines are passing. ### Checklist - [x] [All post commit](https://github.com/tenstorrent/tt-metal/actions/runs/13702739699) CI passes - [x] [TG Nightly](https://github.com/tenstorrent/tt-metal/actions/runs/13688165179) CI passes --------- Co-authored-by: avoraTT <avora@tenstorrent.com> Co-authored-by: yugaoTT <yugao@tenstorrent.com>
- Loading branch information
1 parent
727cfa4
commit f3d8fac
Showing
24 changed files
with
2,212 additions
and
279 deletions.
There are no files selected for viewing
120 changes: 120 additions & 0 deletions
120
models/demos/llama3/tests/test_ccl_async_perf_TG_llama.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,120 @@ | ||
# SPDX-FileCopyrightText: © 2025 Tenstorrent AI ULC | ||
|
||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
import torch | ||
import pytest | ||
from loguru import logger | ||
import ttnn | ||
|
||
from models.perf.benchmarking_utils import BenchmarkData, BenchmarkProfiler | ||
from models.perf.device_perf_utils import run_device_perf_detailed | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"ag_type, warmup_iters, perf_target_us", | ||
[ | ||
("sdpa", 10, 11), | ||
("binary_mult", 10, 12), | ||
("layernorm", 10, 8), | ||
], | ||
) | ||
@pytest.mark.models_device_performance_bare_metal | ||
def test_ag_tg_llama_perf( | ||
ag_type, | ||
warmup_iters, | ||
perf_target_us, | ||
): | ||
profiler = BenchmarkProfiler() | ||
benchmark_data = BenchmarkData() | ||
step_name = f"all_gather_{ag_type}" | ||
|
||
subdir = "llama_ccl_perf" | ||
command = ( | ||
f"pytest tests/ttnn/unit_tests/operations/ccl/test_ccl_async_TG_llama.py::test_all_gather_tg_llama -k {ag_type}" | ||
) | ||
cols = ["DEVICE KERNEL"] | ||
op_name = "AllGatherAsync" | ||
warmup_iters = warmup_iters * 32 # 5 iterations per device | ||
|
||
profiler.start("run") | ||
profiler.start(step_name) | ||
results = run_device_perf_detailed(command, subdir, cols, op_name, has_signposts=True, warmup_iters=warmup_iters) | ||
profiler.end(step_name) | ||
profiler.end("run") | ||
|
||
# Get the measured performance | ||
measured_min_us = results[cols[0]]["MIN"] / 1000 | ||
measured_max_us = results[cols[0]]["MAX"] / 1000 | ||
measured_avg_us = results[cols[0]]["AVG"] / 1000 | ||
measured_std_us = results[cols[0]]["STD"] / 1000 | ||
|
||
logger.info(f"Measured performance: {measured_avg_us:.3f} us vs. target: {perf_target_us} us") | ||
|
||
# Save the measurement | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_gather-{ag_type}-min-us", measured_min_us) | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_gather-{ag_type}-max-us", measured_max_us) | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_gather-{ag_type}-avg-us", measured_avg_us) | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_gather-{ag_type}-std-us", measured_std_us) | ||
benchmark_data.save_partial_run_json( | ||
profiler, | ||
run_type=f"all_gather", | ||
ml_model_name="llama70b-tg-ccl", | ||
) | ||
|
||
assert measured_avg_us < perf_target_us, f"Performance target not met: {measured_avg_us} us > {perf_target_us} us" | ||
|
||
|
||
@pytest.mark.parametrize( | ||
"ar_type, warmup_iters, perf_target_us", | ||
[ | ||
("ff2", 10, 29), | ||
("qkv", 10, 25), | ||
("ff1", 10, 30), | ||
("lm_head", 10, 70), | ||
], | ||
) | ||
@pytest.mark.models_device_performance_bare_metal | ||
def test_ar_tg_llama_perf( | ||
ar_type, | ||
warmup_iters, | ||
perf_target_us, | ||
): | ||
profiler = BenchmarkProfiler() | ||
benchmark_data = BenchmarkData() | ||
step_name = f"all_reduce_{ar_type}" | ||
|
||
subdir = "llama_ccl_perf" | ||
command = ( | ||
f"pytest tests/ttnn/unit_tests/operations/ccl/test_ccl_async_TG_llama.py::test_all_reduce_tg_llama -k {ar_type}" | ||
) | ||
cols = ["DEVICE KERNEL"] | ||
op_name = "AllReduceAsync" | ||
warmup_iters = warmup_iters * 32 # 5 iterations per device | ||
|
||
profiler.start("run") | ||
profiler.start(step_name) | ||
results = run_device_perf_detailed(command, subdir, cols, op_name, has_signposts=True, warmup_iters=warmup_iters) | ||
profiler.end(step_name) | ||
profiler.end("run") | ||
|
||
# Get the measured performance | ||
measured_min_us = results[cols[0]]["MIN"] / 1000 | ||
measured_max_us = results[cols[0]]["MAX"] / 1000 | ||
measured_avg_us = results[cols[0]]["AVG"] / 1000 | ||
measured_std_us = results[cols[0]]["STD"] / 1000 | ||
|
||
logger.info(f"Measured performance: {measured_avg_us:.3f} us vs. target: {perf_target_us} us") | ||
|
||
# Save the measurement | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_reduce-{ar_type}-min-us", measured_min_us) | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_reduce-{ar_type}-max-us", measured_max_us) | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_reduce-{ar_type}-avg-us", measured_avg_us) | ||
benchmark_data.add_measurement(profiler, 0, step_name, f"all_reduce-{ar_type}-std-us", measured_std_us) | ||
benchmark_data.save_partial_run_json( | ||
profiler, | ||
run_type=f"all_reduce", | ||
ml_model_name="llama70b-tg-ccl", | ||
) | ||
|
||
assert measured_avg_us < perf_target_us, f"Performance target not met: {measured_avg_us} us > {perf_target_us} us" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
../../../ttnn/unit_tests/operations/ccl/test_new_all_reduce.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.