-
Notifications
You must be signed in to change notification settings - Fork 116
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Simplify autoformat facilities in preparation for removal #17896
base: main
Are you sure you want to change the base?
Changes from 1 commit
41cbf24
d13ad6d
854bae8
40b9b0b
ed2b224
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1759,8 +1759,8 @@ std::vector<Tensor> ExecuteUnaryBackwardRepeat::invoke( | |
Tensor change_layout_to_tile(const Tensor& temp, const MemoryConfig& output_mem_config) { | ||
auto formatted_input_tensor = temp; | ||
if (formatted_input_tensor.get_layout() == Layout::ROW_MAJOR) { | ||
auto a_pad_shape = ttnn::operations::experimental::auto_format::AutoFormat::pad_to_tile_shape( | ||
temp.get_padded_shape(), false, false, true, true); | ||
auto a_pad_shape = | ||
ttnn::operations::experimental::auto_format::AutoFormat::pad_to_tile_shape(temp.get_padded_shape()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: can we do something like using ttnn::operations::experimental::auto_format::AutoFormat autoformat and avoid all this long line? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. aim is to remove, so not changing in this PR yet. |
||
if (!ttnn::operations::experimental::auto_format::AutoFormat::check_input_tensor_format(temp, a_pad_shape)) { | ||
formatted_input_tensor = ttnn::operations::experimental::auto_format::AutoFormat::format_input_tensor( | ||
temp, temp.device(), a_pad_shape, 1.0, Layout::TILE); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -96,7 +96,7 @@ Tensor AutoFormat::format_input_tensor( | |
padded_shape.to_array_4D(), | ||
tt::tt_metal::Array4D({0, 0, 0, 0}), | ||
pad_value, | ||
false, | ||
false, /* multicore */ | ||
mem_config); | ||
} | ||
} else if (convert_layout && pad_input) { | ||
|
@@ -117,7 +117,7 @@ Tensor AutoFormat::format_input_tensor( | |
padded_shape.to_array_4D(), | ||
tt::tt_metal::Array4D({0, 0, 0, 0}), | ||
pad_value, | ||
false, | ||
false, /* multicore */ | ||
ayerofieiev-tt marked this conversation as resolved.
Show resolved
Hide resolved
|
||
mem_config); | ||
} | ||
} | ||
|
@@ -182,24 +182,23 @@ Tensor AutoFormat::format_output_tensor( | |
} else if (unpad_output && !convert_layout) { | ||
// Output can be unpadded and layout supports the shape | ||
if ((formatted_output.get_layout() == Layout::TILE && AutoFormat::legal_tile_shape(shape)) || | ||
(formatted_output.get_layout() == Layout::ROW_MAJOR && AutoFormat::legal_rm_shape(shape))) { | ||
(formatted_output.get_layout() == Layout::ROW_MAJOR)) { | ||
auto begins = std::array<uint32_t, 4>({0, 0, 0, 0}); | ||
auto ends = std::array<uint32_t, 4>({shape[0], shape[1], shape[2], shape[3]}); | ||
auto step = std::array<uint32_t, 4>({1, 1, 1, 1}); | ||
|
||
formatted_output = ttnn::slice(formatted_output, begins, ends, step, mem_config); | ||
return formatted_output; | ||
// Output is tile but shape cannot be tile. We leave in RM | ||
} else if (formatted_output.get_layout() == Layout::TILE && AutoFormat::legal_rm_shape(shape)) { | ||
} else if (formatted_output.get_layout() == Layout::TILE) { | ||
formatted_output = ttnn::untilize_with_unpadding( | ||
formatted_output, | ||
ttnn::Shape({shape[0] - 1, shape[1] - 1, shape[2] - 1, shape[3] - 1}), | ||
mem_config); | ||
return formatted_output; | ||
} | ||
} else if (unpad_output && convert_layout) { | ||
if (formatted_output.get_layout() == Layout::TILE && target_layout == Layout::ROW_MAJOR && | ||
AutoFormat::legal_rm_shape(shape)) { | ||
if (formatted_output.get_layout() == Layout::TILE && target_layout == Layout::ROW_MAJOR) { | ||
formatted_output = ttnn::untilize_with_unpadding( | ||
formatted_output, | ||
ttnn::Shape({shape[0] - 1, shape[1] - 1, shape[2] - 1, shape[3] - 1}), | ||
|
@@ -255,4 +254,49 @@ Tensor AutoFormat::format_output_tensor( | |
return formatted_output; | ||
} | ||
|
||
void AutoFormat::SetDefaultDevice(tt::tt_metal::IDevice* dev) { device = dev; } | ||
|
||
tt::tt_metal::IDevice* AutoFormat::GetDefaultDevice() { return device; } | ||
|
||
ttnn::Shape AutoFormat::pad_to_tile_shape(const ttnn::Shape& unpadded_shape) { | ||
using namespace tt::constants; | ||
auto rank = unpadded_shape.rank(); | ||
TT_ASSERT(rank >= 1, "rank of shape to pad to tile shape must be at least 1."); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. TT_FATAL might be better. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. agree, but not changing in this PR.
|
||
SmallVector<uint32_t> padded_shape_vec(rank); | ||
|
||
for (auto i = 0; i < rank; ++i) { | ||
padded_shape_vec[i] = unpadded_shape[i]; | ||
} | ||
if (rank >= 1) { | ||
auto w = tt::round_up(unpadded_shape[rank - 1], TILE_WIDTH); | ||
padded_shape_vec[rank - 1] = w; | ||
} | ||
if (rank >= 2) { | ||
auto h = tt::round_up(unpadded_shape[rank - 2], TILE_HEIGHT); | ||
padded_shape_vec[rank - 2] = h; | ||
} | ||
return Shape(padded_shape_vec); | ||
} | ||
|
||
bool AutoFormat::legal_tile_shape(const ttnn::Shape& shape) { | ||
return (shape[2] % tt::constants::TILE_HEIGHT == 0 && shape[3] % tt::constants::TILE_WIDTH == 0); | ||
ayerofieiev-tt marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
bool AutoFormat::legal_device_shape(const ttnn::Shape& shape, tt::tt_metal::Layout layout) { | ||
switch (layout) { | ||
case tt::tt_metal::Layout::ROW_MAJOR: return true; | ||
case tt::tt_metal::Layout::TILE: return legal_tile_shape(shape); | ||
default: return true; | ||
} | ||
} | ||
|
||
bool AutoFormat::check_input_tensor_format( | ||
const Tensor& a, const ttnn::Shape& shape, tt::tt_metal::Layout target_layout) { | ||
if (a.get_layout() == target_layout && a.get_padded_shape() == shape && | ||
a.storage_type() == tt::tt_metal::StorageType::DEVICE) { | ||
return true; | ||
} | ||
return false; | ||
} | ||
|
||
} // namespace ttnn::operations::experimental::auto_format |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'd create a standalone function. might be usefull in c++ too.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this thing must get removed. tile shape is not constant.. no need for
ttnn::operations::experimental::auto_format::AutoFormat
when there is a TensorSpec