Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Skip test when atomic operations are not supported on GPU. #7117

Merged
merged 8 commits into from
Feb 21, 2024
30 changes: 15 additions & 15 deletions tests/python/common/ops/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,22 +407,22 @@ def test_segment_mm(idtype, feat_size, dtype, tol):
def test_gather_mm_idx_b(feat_size, dtype, tol):
if F._default_context_str == "cpu" and dtype == torch.float16:
pytest.skip("float16 is not supported on CPU.")
if (
F._default_context_str == "gpu"
and dtype == torch.bfloat16
and not torch.cuda.is_bf16_supported()
):
pytest.skip("BF16 is not supported.")

if (
F._default_context_str == "gpu"
and dtype == torch.float16
and torch.cuda.get_device_capability() < (7, 0)
):
pytest.skip(
f"FP16 is not supported for atomic operations on GPU with "
f"cuda capability ({torch.cuda.get_device_capability()})."
)
if F._default_context_str == "gpu":
if dtype == torch.bfloat16 and not torch.cuda.is_bf16_supported():
pytest.skip("BF16 is not supported.")

if (
dtype == torch.float16
and torch.cuda.get_device_capability() < (7, 0)
) or (
dtype == torch.bfloat16
and torch.cuda.get_device_capability() < (8, 0)
):
pytest.skip(
f"{dtype} is not supported for atomic operations on GPU with "
f"cuda capability ({torch.cuda.get_device_capability()})."
)

dev = F.ctx()
# input
Expand Down
Loading