Skip to content

Commit

Permalink
Extend the capabilities of the graph tooling to include extra informa…
Browse files Browse the repository at this point in the history
…tion of the ttnn operations (#18380)""

bring back commit 87ab998.
  • Loading branch information
dgomezTT committed Mar 6, 2025
1 parent f3d8fac commit 28017ea
Show file tree
Hide file tree
Showing 15 changed files with 713 additions and 22 deletions.
2 changes: 2 additions & 0 deletions tests/ttnn/unit_tests/gtests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ set(TTNN_UNIT_TESTS_SRC
${CMAKE_CURRENT_SOURCE_DIR}/test_async_runtime.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_multiprod_queue.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_multi_cq_multi_dev.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_graph_capture_arguments_morehdot.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_graph_capture_arguments_transpose.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_graph_query_op_constraints.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_graph_query_op_runtime.cpp
${CMAKE_CURRENT_SOURCE_DIR}/test_reflect.cpp
Expand Down
113 changes: 113 additions & 0 deletions tests/ttnn/unit_tests/gtests/test_graph_capture_arguments_morehdot.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
// SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.
//
// SPDX-License-Identifier: Apache-2.0

#include "gtest/gtest.h"
#include "ttnn_test_fixtures.hpp"
#include "ttnn/device.hpp"
#include "ttnn/graph/graph_processor.hpp"
#include "ttnn/graph/graph_consts.hpp"
#include "ttnn/graph/graph_trace_utils.hpp"
#include "ttnn/operations/moreh/moreh_dot/moreh_dot.hpp"
#include <optional>
#include <string>

namespace ttnn::graph::arguments::test {

class TestGraphCaptureArgumentsMorehDot : public TTNNFixtureWithTensor {};

TEST_P(TestGraphCaptureArgumentsMorehDot, MorehDot) {
auto tt_input1 = CreateTensor();
auto tt_input2 = CreateTensor();
ttnn::graph::GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NORMAL);
ttnn::moreh_dot(tt_input1, tt_input2, std::nullopt, DataType::BFLOAT16, std::nullopt, std::nullopt);
auto trace = ttnn::graph::GraphProcessor::end_graph_capture();
auto operations = ttnn::graph::extract_arguments(trace);

auto operation0 = operations[0];
EXPECT_EQ(operation0.operation_name, "ttnn::moreh_dot");
EXPECT_EQ(operation0.arguments.size(), 6);
EXPECT_EQ(
operation0.arguments[0],
"Tensor(storage=DeviceStorage(memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_"
"type=BufferType::L1,shard_spec=std::nullopt)),tensor_spec=TensorSpec(logical_shape=Shape([1, 1, 1, "
"32]),tensor_layout=TensorLayout(dtype=BFLOAT16,page_config=PageConfig(config=TilePageConfig(tile=Tile(tile_"
"shape={32, 32},face_shape={16, "
"16},num_faces=4))),memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type="
"BufferType::L1,shard_spec=std::nullopt),alignment=Alignment([32, 32]))))");
EXPECT_EQ(
operation0.arguments[1],
"Tensor(storage=DeviceStorage(memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_"
"type=BufferType::L1,shard_spec=std::nullopt)),tensor_spec=TensorSpec(logical_shape=Shape([1, 1, 1, "
"32]),tensor_layout=TensorLayout(dtype=BFLOAT16,page_config=PageConfig(config=TilePageConfig(tile=Tile(tile_"
"shape={32, 32},face_shape={16, "
"16},num_faces=4))),memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type="
"BufferType::L1,shard_spec=std::nullopt),alignment=Alignment([32, 32]))))");
EXPECT_EQ(operation0.arguments[2], "[ unsupported type , std::__1::reference_wrapper<std::__1::nullopt_t const>]");
EXPECT_EQ(operation0.arguments[3], "BFLOAT16");
EXPECT_EQ(operation0.arguments[4], "[ unsupported type , std::__1::reference_wrapper<std::__1::nullopt_t const>]");
EXPECT_EQ(operation0.arguments[5], "[ unsupported type , std::__1::reference_wrapper<std::__1::nullopt_t const>]");

auto operation1 = operations[1];
EXPECT_EQ(operation1.operation_name, "ttnn::prim::moreh_dot");
EXPECT_EQ(operation1.arguments.size(), 6);
EXPECT_EQ(
operation1.arguments[0],
"Tensor(storage=DeviceStorage(memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_"
"type=BufferType::L1,shard_spec=std::nullopt)),tensor_spec=TensorSpec(logical_shape=Shape([1, 1, 1, "
"32]),tensor_layout=TensorLayout(dtype=BFLOAT16,page_config=PageConfig(config=TilePageConfig(tile=Tile(tile_"
"shape={32, 32},face_shape={16, "
"16},num_faces=4))),memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type="
"BufferType::L1,shard_spec=std::nullopt),alignment=Alignment([32, 32]))))");
EXPECT_EQ(
operation1.arguments[1],
"Tensor(storage=DeviceStorage(memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_"
"type=BufferType::L1,shard_spec=std::nullopt)),tensor_spec=TensorSpec(logical_shape=Shape([1, 1, 1, "
"32]),tensor_layout=TensorLayout(dtype=BFLOAT16,page_config=PageConfig(config=TilePageConfig(tile=Tile(tile_"
"shape={32, 32},face_shape={16, "
"16},num_faces=4))),memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type="
"BufferType::L1,shard_spec=std::nullopt),alignment=Alignment([32, 32]))))");
EXPECT_EQ(operation1.arguments[2], "nullopt");
EXPECT_EQ(operation1.arguments[3], "BFLOAT16");
EXPECT_EQ(operation1.arguments[4], "nullopt");
EXPECT_EQ(
operation1.arguments[5],
"[ unsupported type , "
"std::__1::reference_wrapper<std::__1::optional<std::__1::variant<ttnn::GrayskullComputeKernelConfig, "
"ttnn::WormholeComputeKernelConfig>> const>]");

auto operation2 = operations[2];
EXPECT_EQ(operation2.operation_name, "MorehDotOperation");
EXPECT_EQ(operation2.arguments.size(), 2);
EXPECT_EQ(
operation2.arguments[0],
"[ unsupported type , "
"std::__1::reference_wrapper<ttnn::operations::moreh::moreh_dot::MorehDotOperation::operation_attributes_t "
"const>]");
EXPECT_EQ(
operation2.arguments[1],
"[ unsupported type , "
"std::__1::reference_wrapper<ttnn::operations::moreh::moreh_dot::MorehDotOperation::tensor_args_t const>]");

auto operation3 = operations[3];
EXPECT_EQ(operation3.operation_name, "tt::tt_metal::create_device_tensor");
EXPECT_EQ(operation3.arguments.size(), 5);
EXPECT_EQ(operation3.arguments[0], "Shape([1, 1, 1, 1])");
EXPECT_EQ(operation3.arguments[1], "BFLOAT16");
EXPECT_EQ(operation3.arguments[2], "Tile");
EXPECT_EQ(operation3.arguments[3], "[ unsupported type , std::__1::reference_wrapper<tt::tt_metal::v0::IDevice*>]");
EXPECT_EQ(
operation3.arguments[4],
"MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type=BufferType::L1,shard_spec=std::"
"nullopt)");
}

INSTANTIATE_TEST_SUITE_P(
TestGraphCaptureArgumentsMorehDot_MorehDot,
TestGraphCaptureArgumentsMorehDot,
::testing::Values(CreateTensorParameters{
.input_shape = ttnn::Shape({1, 1, 1, 32}),
.dtype = DataType::BFLOAT16,
.layout = TILE_LAYOUT,
.mem_cfg = L1_MEMORY_CONFIG}));
} // namespace ttnn::graph::arguments::test
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
// SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.
//
// SPDX-License-Identifier: Apache-2.0

#include "gtest/gtest.h"
#include "ttnn_test_fixtures.hpp"
#include "ttnn/device.hpp"
#include "ttnn/graph/graph_processor.hpp"
#include "ttnn/graph/graph_consts.hpp"
#include "ttnn/graph/graph_trace_utils.hpp"
#include "ttnn/operations/data_movement/transpose/transpose.hpp"
#include <optional>
#include <string>

namespace ttnn::graph::arguments::test {

class TestGraphCaptureArgumentsTranspose : public TTNNFixtureWithTensor {};

TEST_P(TestGraphCaptureArgumentsTranspose, Transpose) {
auto tt_input = CreateTensor();
tt_input.reshape(ttnn::Shape{1, 2048, 4, 128});
ttnn::graph::GraphProcessor::begin_graph_capture(tt::tt_metal::IGraphProcessor::RunMode::NORMAL);
ttnn::transpose(tt_input, 1, 2);
auto trace = ttnn::graph::GraphProcessor::end_graph_capture();
auto operations = ttnn::graph::extract_arguments(trace);

auto operation0 = operations[0];
EXPECT_EQ(operation0.operation_name, "ttnn::transpose");
EXPECT_EQ(operation0.arguments.size(), 3);
EXPECT_EQ(
operation0.arguments[0],
"Tensor(storage=DeviceStorage(memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_"
"type=BufferType::L1,shard_spec=std::nullopt)),tensor_spec=TensorSpec(logical_shape=Shape([1, 1, 2048, "
"512]),tensor_layout=TensorLayout(dtype=BFLOAT16,page_config=PageConfig(config=RowMajorPageConfig(tile=Tile("
"tile_shape={32, 32},face_shape={16, "
"16},num_faces=4))),memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type="
"BufferType::L1,shard_spec=std::nullopt),alignment=Alignment([1]))))");
EXPECT_EQ(operation0.arguments[1], "1");
EXPECT_EQ(operation0.arguments[2], "2");

auto operation1 = operations[1];
EXPECT_EQ(operation1.operation_name, "ttnn::prim::permute");
EXPECT_EQ(operation1.arguments.size(), 5);
EXPECT_EQ(
operation1.arguments[0],
"Tensor(storage=DeviceStorage(memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_"
"type=BufferType::L1,shard_spec=std::nullopt)),tensor_spec=TensorSpec(logical_shape=Shape([1, 1, 2048, "
"512]),tensor_layout=TensorLayout(dtype=BFLOAT16,page_config=PageConfig(config=RowMajorPageConfig(tile=Tile("
"tile_shape={32, 32},face_shape={16, "
"16},num_faces=4))),memory_config=MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type="
"BufferType::L1,shard_spec=std::nullopt),alignment=Alignment([1]))))");
EXPECT_EQ(operation1.arguments[1], "SmallVector([0, 2, 1, 3])");
EXPECT_EQ(
operation1.arguments[2],
"MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type=BufferType::L1,shard_spec=std::"
"nullopt)");
EXPECT_EQ(operation1.arguments[3], "[ unsupported type , std::__1::reference_wrapper<std::__1::nullopt_t const>]");
EXPECT_EQ(operation1.arguments[4], "0");

auto operation2 = operations[2];
EXPECT_EQ(operation2.operation_name, "PermuteDeviceOperation");
EXPECT_EQ(operation2.arguments.size(), 2);
EXPECT_EQ(
operation2.arguments[0],
"[ unsupported type , "
"std::__1::reference_wrapper<ttnn::operations::data_movement::PermuteDeviceOperation::operation_attributes_t "
"const>]");
EXPECT_EQ(
operation2.arguments[1],
"[ unsupported type , "
"std::__1::reference_wrapper<ttnn::operations::data_movement::PermuteDeviceOperation::tensor_args_t const>]");

auto operation3 = operations[3];
EXPECT_EQ(operation3.operation_name, "tt::tt_metal::create_device_tensor");
EXPECT_EQ(operation3.arguments.size(), 5);
EXPECT_EQ(operation3.arguments[0], "Shape([1, 2048, 1, 512])");
EXPECT_EQ(operation3.arguments[1], "BFLOAT16");
EXPECT_EQ(operation3.arguments[2], "Row Major");
EXPECT_EQ(operation3.arguments[3], "[ unsupported type , std::__1::reference_wrapper<tt::tt_metal::v0::IDevice*>]");
EXPECT_EQ(
operation3.arguments[4],
"MemoryConfig(memory_layout=TensorMemoryLayout::INTERLEAVED,buffer_type=BufferType::L1,shard_spec=std::"
"nullopt)");
}

INSTANTIATE_TEST_SUITE_P(
TestGraphCaptureArgumentsTranspose_Transpose,
TestGraphCaptureArgumentsTranspose,
::testing::Values(CreateTensorParameters{
.input_shape = ttnn::Shape({1, 1, 2048, 512}),
.dtype = DataType::BFLOAT16,
.layout = ROW_MAJOR_LAYOUT,
.mem_cfg = L1_MEMORY_CONFIG}));
} // namespace ttnn::graph::arguments::test
22 changes: 22 additions & 0 deletions tests/ttnn/unit_tests/gtests/ttnn_test_fixtures.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@

#include "ttnn/device.hpp"
#include "ttnn/types.hpp"
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/tensor/tensor_impl.hpp"
#include "tests/tt_metal/test_utils/env_vars.hpp"
#include <tt-metalium/host_api.hpp>
#include "hostdevcommon/common_values.hpp"
Expand Down Expand Up @@ -53,6 +55,26 @@ class TTNNFixtureWithDevice : public TTNNFixture {
tt::tt_metal::IDevice& getDevice() { return *device_; }
};

struct CreateTensorParameters {
ttnn::Shape input_shape;
DataType dtype;
Layout layout;
MemoryConfig mem_cfg;
};

class TTNNFixtureWithTensor : public TTNNFixtureWithDevice, public testing::WithParamInterface<CreateTensorParameters> {
protected:
[[nodiscard]] const Tensor CreateTensor() {
CreateTensorParameters params = GetParam();
TensorSpec tensor_spec(
params.input_shape, TensorLayout(params.dtype, PageConfig(params.layout), params.mem_cfg));
auto input_buffer = tt::tt_metal::tensor_impl::allocate_buffer_on_device(device_, tensor_spec);
auto input_storage = tt::tt_metal::DeviceStorage{input_buffer};
Tensor input_tensor = Tensor(input_storage, params.input_shape, params.dtype, params.layout);
return std::move(input_tensor);
}
};

} // namespace ttnn

namespace ttnn::distributed::test {
Expand Down
Loading

0 comments on commit 28017ea

Please sign in to comment.