From 49a7c54ab98e2464cc2435f754cac01baf9bdbb2 Mon Sep 17 00:00:00 2001 From: Michael Levesque-Dion Date: Wed, 17 Apr 2024 10:55:59 -0700 Subject: [PATCH 1/4] Implement ConditionallySpeculatable for ConvolutionOp Relevant constraints from the spec: ``` (C10) dim(lhs, input_batch_dimension) % batch_group_count = 0. (C11) dim(lhs, input_feature_dimension) % feature_group_count = 0. (C14) dim(rhs, kernel_input_feature_dimension) = dim(lhs, input_feature_dimension) / feature_group_count. (C15) dim(rhs, kernel_output_feature_dimension) % batch_group_count = 0. (C16) dim(rhs, kernel_output_feature_dimension) % feature_group_count = 0. (C25) dim(result, result_dim) is defined as: * dim(lhs, input_batch_dimension) / batch_group_count if result_dim = output_batch_dimension. * dim(rhs, kernel_output_feature_dimension) if result_dim = output_feature_dimension. * num_windows otherwise, where: * output_spatial_dimensions[spatial_dim] = result_dim. * lhs_dim = input_spatial_dimensions[spatial_dim]. * rhs_dim = kernel_spatial_dimensions[spatial_dim]. * dilated_input_shape[lhs_dim] = dim(lhs, lhs_dim) = 0 ? 0 : (dim(lhs, lhs_dim) - 1) * lhs_dilation[spatial_dim] + 1. * padded_input_shape[lhs_dim] = padding[spatial_dim, 0] + dilated_input_shape[lhs_dim] + padding[spatial_dim, 1]. * dilated_window_shape[lhs_dim] = dim(rhs, rhs_dim) = 0 ? 0 : (dim(rhs, rhs_dim) - 1) * rhs_dilation[spatial_dim] + 1. * is_empty_window[lhs_dim] = padded_input_shape[lhs_dim] = 0 || dilated_window_shape[lhs_dim] > padded_input_shape[lhs_dim]. * num_windows = is_empty_window[lhs_dim] ? 0 : floor((padded_input_shape[lhs_dim] - dilated_window_shape[lhs_dim]) / window_strides[spatial_dim]) + 1. ``` Because of (C14), input_feature_dimension and kernel_input_feature_dimension must be static. input_batch_dimension must be static if batch_group_count > 1 (C10) or if output_batch_dimension is static (C25, first bullet). kernel_output_feature_dimension must be static if batch_group_count > 1 (C15) or feature_group_count > 1 (C16) or if output_feature_dimension is static (C25, second bullet). Because of (C25), each spatial dimension in the output can depend on the spatial dimensions in the inputs (input + kernel), so if it is static in the output, it must be static in the inputs, otherwise mismatches could occur at runtime. --- stablehlo/dialect/StablehloOps.cpp | 52 +++++++++++ stablehlo/dialect/StablehloOps.td | 8 +- stablehlo/tests/ops_speculatability.mlir | 105 +++++++++++++++++++++++ 3 files changed, 164 insertions(+), 1 deletion(-) diff --git a/stablehlo/dialect/StablehloOps.cpp b/stablehlo/dialect/StablehloOps.cpp index da5b84da27..a775a5fd6f 100644 --- a/stablehlo/dialect/StablehloOps.cpp +++ b/stablehlo/dialect/StablehloOps.cpp @@ -1017,6 +1017,58 @@ LogicalResult ConvolutionOp::verify() { getResult().getType()); } +mlir::Speculation::Speculatability ConvolutionOp::getSpeculatability() { + // input_feature_dimension and kernel_input_feature_dimension must be static + // (C14) + // input_batch_dimension must be static if batch_group_count > 1 (C10) or if + // output_batch_dimension is static (C25, first bullet). + // kernel_output_feature_dimension must be static if batch_group_count > 1 + // (C15) or feature_group_count > 1 (C16) or if output_feature_dimension is + // static (C25, second bullet). If a spatial dimension is static in the + // output, it must be static in the inputs. + + auto inputType = getLhs().getType(); + auto kernelType = getRhs().getType(); + auto resultType = getType(); + + auto dimNumbers = getDimensionNumbers(); + auto inputBatchDim = dimNumbers.getInputBatchDimension(); + auto inputFeatureDim = dimNumbers.getInputFeatureDimension(); + auto inputSpatialDims = dimNumbers.getInputSpatialDimensions(); + auto kernelInputFeatureDim = dimNumbers.getKernelInputFeatureDimension(); + auto kernelOutputFeatureDim = dimNumbers.getKernelOutputFeatureDimension(); + auto kernelSpatialDims = dimNumbers.getKernelSpatialDimensions(); + auto outputBatchDim = dimNumbers.getOutputBatchDimension(); + auto outputFeatureDim = dimNumbers.getOutputFeatureDimension(); + auto outputSpatialDims = dimNumbers.getOutputSpatialDimensions(); + + auto batchGroupCount = getBatchGroupCount(); + auto featureGroupCount = getFeatureGroupCount(); + + if (inputType.isDynamicDim(inputFeatureDim) || + kernelType.isDynamicDim(kernelInputFeatureDim)) + return mlir::Speculation::NotSpeculatable; + + if (inputType.isDynamicDim(inputBatchDim) && + (batchGroupCount > 1 || !resultType.isDynamicDim(outputBatchDim))) + return mlir::Speculation::NotSpeculatable; + + if (kernelType.isDynamicDim(kernelOutputFeatureDim) && + (batchGroupCount > 1 || featureGroupCount > 1 || + !resultType.isDynamicDim(outputFeatureDim))) + return mlir::Speculation::NotSpeculatable; + + for (auto [inputDim, kernelDim, resultDim] : + llvm::zip(inputSpatialDims, kernelSpatialDims, outputSpatialDims)) { + if (!resultType.isDynamicDim(resultDim) && + (inputType.isDynamicDim(inputDim) || + kernelType.isDynamicDim(kernelDim))) + return mlir::Speculation::NotSpeculatable; + } + + return mlir::Speculation::Speculatable; +} + //===----------------------------------------------------------------------===// // ConvertOp //===----------------------------------------------------------------------===// diff --git a/stablehlo/dialect/StablehloOps.td b/stablehlo/dialect/StablehloOps.td index dce23c766f..701a35c0a4 100644 --- a/stablehlo/dialect/StablehloOps.td +++ b/stablehlo/dialect/StablehloOps.td @@ -2204,7 +2204,8 @@ def StableHLO_CompositeOp : StableHLO_Op<"composite", [DeclareOpInterfaceMethods let assemblyFormat = "$name $inputs attr-dict `:` functional-type(operands, results)"; } -def StableHLO_ConvolutionOp : StableHLO_Op<"convolution", [Pure]> { +def StableHLO_ConvolutionOp : StableHLO_Op<"convolution", + [ConditionallySpeculatable, NoMemoryEffect]> { let summary = "Convolution operation"; let description = [{ Computes dot products between windows of `lhs` and slices of `rhs` and @@ -2252,6 +2253,11 @@ def StableHLO_ConvolutionOp : StableHLO_Op<"convolution", [Pure]> { $window_reversal) `}` attr-dict `:` functional-type(operands, results) }]; + + let extraClassDeclaration = commonClassDeclaration # [{ + /// Interface method for ConditionallySpeculatable. + mlir::Speculation::Speculatability getSpeculatability(); + }]; } def StableHLO_CrossReplicaSumOp : StableHLO_Op<"cross-replica-sum", diff --git a/stablehlo/tests/ops_speculatability.mlir b/stablehlo/tests/ops_speculatability.mlir index 3dcffb1eda..f142c77922 100644 --- a/stablehlo/tests/ops_speculatability.mlir +++ b/stablehlo/tests/ops_speculatability.mlir @@ -1366,6 +1366,111 @@ func.func @concatenate(%static_arg: tensor<2x2xi64>, %first_dim_dynamic: tensor< // ----- +// CHECK-LABEL: func @convolution +// CHECK-NEXT: return +func.func @convolution( + %input_static: tensor<100x26x26x32xf64>, %kernel_static: tensor<3x3x2x32xf64>, + %input_feature_dim_dynamic: tensor<100x26x26x?xf64>, %input_batch_dim_dynamic: tensor, + %kernel_feature_dim_dynamic: tensor<3x3x2x?xf64>, %kernel_output_feature_dim_dynamic: tensor<3x3x?x32xf64>, %kernel_output_feature_dim_dynamic_2_feature_groups: tensor<3x3x?x16xf64>, + %input_spatial_dims_dynamic: tensor<100x?x?x32xf64>, %kernel_spatial_dims_dynamic: tensor +) { + // Inputs fully static + %0 = stablehlo.convolution(%input_static, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x2x32xf64>) -> tensor<100x24x24x2xf64> + "hlo_test_speculatability.is_speculatable"(%0) : (tensor<100x24x24x2xf64>) -> () + %1 = stablehlo.convolution(%input_static, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x2x32xf64>) -> tensor + "hlo_test_speculatability.is_speculatable"(%1) : (tensor) -> () + + // input_feature_dimension is dynamic + %2 = stablehlo.convolution(%input_feature_dim_dynamic, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x?xf64>, tensor<3x3x2x32xf64>) -> tensor + "hlo_test_speculatability.is_not_speculatable"(%2) : (tensor) -> () + + // kernel_input_feature_dimension is dynamic + %3 = stablehlo.convolution(%input_static, %kernel_feature_dim_dynamic) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x2x?xf64>) -> tensor + "hlo_test_speculatability.is_not_speculatable"(%3) : (tensor) -> () + + // input_batch_dimension is dynamic + %4 = stablehlo.convolution(%input_batch_dim_dynamic, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor, tensor<3x3x2x32xf64>) -> tensor + "hlo_test_speculatability.is_speculatable"(%4) : (tensor) -> () + // batch_group_count > 1 + %5 = stablehlo.convolution(%input_batch_dim_dynamic, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 2 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor, tensor<3x3x2x32xf64>) -> tensor + "hlo_test_speculatability.is_not_speculatable"(%5) : (tensor) -> () + // output_batch_dimension is static + %6 = stablehlo.convolution(%input_batch_dim_dynamic, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor, tensor<3x3x2x32xf64>) -> tensor<100x?x?x?xf64> + "hlo_test_speculatability.is_not_speculatable"(%6) : (tensor<100x?x?x?xf64>) -> () + + // kernel_output_feature_dimension is dynamic + %7 = stablehlo.convolution(%input_static, %kernel_output_feature_dim_dynamic) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x?x32xf64>) -> tensor + "hlo_test_speculatability.is_speculatable"(%7) : (tensor) -> () + // batch_group_count > 1 + %8 = stablehlo.convolution(%input_static, %kernel_output_feature_dim_dynamic) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 2 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x?x32xf64>) -> tensor + "hlo_test_speculatability.is_not_speculatable"(%8) : (tensor) -> () + // feature_group_count > 1 + %9 = stablehlo.convolution(%input_static, %kernel_output_feature_dim_dynamic_2_feature_groups) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 2 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x?x16xf64>) -> tensor + "hlo_test_speculatability.is_not_speculatable"(%9) : (tensor) -> () + // output_feature_dimension is static + %10 = stablehlo.convolution(%input_static, %kernel_output_feature_dim_dynamic) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor<3x3x?x32xf64>) -> tensor + "hlo_test_speculatability.is_not_speculatable"(%10) : (tensor) -> () + + // Spatial dimensions dynamic + %11 = stablehlo.convolution(%input_spatial_dims_dynamic, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x?x?x32xf64>, tensor<3x3x2x32xf64>) -> tensor<100x24x24x2xf64> + "hlo_test_speculatability.is_not_speculatable"(%11) : (tensor<100x24x24x2xf64>) -> () + %12 = stablehlo.convolution(%input_spatial_dims_dynamic, %kernel_static) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x?x?x32xf64>, tensor<3x3x2x32xf64>) -> tensor<100x?x?x2xf64> + "hlo_test_speculatability.is_speculatable"(%12) : (tensor<100x?x?x2xf64>) -> () + %13 = stablehlo.convolution(%input_static, %kernel_spatial_dims_dynamic) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor) -> tensor<100x24x24x2xf64> + "hlo_test_speculatability.is_not_speculatable"(%13) : (tensor<100x24x24x2xf64>) -> () + %14 = stablehlo.convolution(%input_static, %kernel_spatial_dims_dynamic) + dim_numbers = [b, 0, 1, f] x [0, 1, o, i] -> [b, 0, 1, f], + window = {stride = [], pad = [], lhs_dilate = [], rhs_dilate = [], reverse = []} + {batch_group_count = 1 : i64, feature_group_count = 1 : i64, precision_config = [#stablehlo, #stablehlo]} : (tensor<100x26x26x32xf64>, tensor) -> tensor<100x?x?x2xf64> + "hlo_test_speculatability.is_speculatable"(%14) : (tensor<100x?x?x2xf64>) -> () + + return +} + +// ----- + // CHECK-LABEL: func @dot_general // CHECK-NEXT: return func.func @dot_general( From a2576e1b91a11968c8e94f1569ec62da88002053 Mon Sep 17 00:00:00 2001 From: Michael Levesque-Dion Date: Wed, 17 Apr 2024 11:39:26 -0700 Subject: [PATCH 2/4] Split comment and move parts above relevant code --- stablehlo/dialect/StablehloOps.cpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/stablehlo/dialect/StablehloOps.cpp b/stablehlo/dialect/StablehloOps.cpp index a775a5fd6f..ab5f63036a 100644 --- a/stablehlo/dialect/StablehloOps.cpp +++ b/stablehlo/dialect/StablehloOps.cpp @@ -1018,15 +1018,6 @@ LogicalResult ConvolutionOp::verify() { } mlir::Speculation::Speculatability ConvolutionOp::getSpeculatability() { - // input_feature_dimension and kernel_input_feature_dimension must be static - // (C14) - // input_batch_dimension must be static if batch_group_count > 1 (C10) or if - // output_batch_dimension is static (C25, first bullet). - // kernel_output_feature_dimension must be static if batch_group_count > 1 - // (C15) or feature_group_count > 1 (C16) or if output_feature_dimension is - // static (C25, second bullet). If a spatial dimension is static in the - // output, it must be static in the inputs. - auto inputType = getLhs().getType(); auto kernelType = getRhs().getType(); auto resultType = getType(); @@ -1045,19 +1036,27 @@ mlir::Speculation::Speculatability ConvolutionOp::getSpeculatability() { auto batchGroupCount = getBatchGroupCount(); auto featureGroupCount = getFeatureGroupCount(); + // input_feature_dimension and kernel_input_feature_dimension must be static + // (C14) if (inputType.isDynamicDim(inputFeatureDim) || kernelType.isDynamicDim(kernelInputFeatureDim)) return mlir::Speculation::NotSpeculatable; + // input_batch_dimension must be static if batch_group_count > 1 (C10) or if + // output_batch_dimension is static (C25, first bullet). if (inputType.isDynamicDim(inputBatchDim) && (batchGroupCount > 1 || !resultType.isDynamicDim(outputBatchDim))) return mlir::Speculation::NotSpeculatable; + // kernel_output_feature_dimension must be static if batch_group_count > 1 + // (C15) or feature_group_count > 1 (C16) or if output_feature_dimension is if (kernelType.isDynamicDim(kernelOutputFeatureDim) && (batchGroupCount > 1 || featureGroupCount > 1 || !resultType.isDynamicDim(outputFeatureDim))) return mlir::Speculation::NotSpeculatable; + // static (C25, second bullet). If a spatial dimension is static in the + // output, it must be static in the inputs. for (auto [inputDim, kernelDim, resultDim] : llvm::zip(inputSpatialDims, kernelSpatialDims, outputSpatialDims)) { if (!resultType.isDynamicDim(resultDim) && From 9aa1d22721bd9b417e50100c892da228e9ff7129 Mon Sep 17 00:00:00 2001 From: Michael Levesque-Dion Date: Wed, 17 Apr 2024 11:44:14 -0700 Subject: [PATCH 3/4] Fix bad comment split and remove too specific references to C25 --- stablehlo/dialect/StablehloOps.cpp | 9 +++++---- stablehlo/dialect/StablehloOps.td | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/stablehlo/dialect/StablehloOps.cpp b/stablehlo/dialect/StablehloOps.cpp index ab5f63036a..0dfde89765 100644 --- a/stablehlo/dialect/StablehloOps.cpp +++ b/stablehlo/dialect/StablehloOps.cpp @@ -1037,26 +1037,27 @@ mlir::Speculation::Speculatability ConvolutionOp::getSpeculatability() { auto featureGroupCount = getFeatureGroupCount(); // input_feature_dimension and kernel_input_feature_dimension must be static - // (C14) + // (C14). if (inputType.isDynamicDim(inputFeatureDim) || kernelType.isDynamicDim(kernelInputFeatureDim)) return mlir::Speculation::NotSpeculatable; // input_batch_dimension must be static if batch_group_count > 1 (C10) or if - // output_batch_dimension is static (C25, first bullet). + // output_batch_dimension is static (C25). if (inputType.isDynamicDim(inputBatchDim) && (batchGroupCount > 1 || !resultType.isDynamicDim(outputBatchDim))) return mlir::Speculation::NotSpeculatable; // kernel_output_feature_dimension must be static if batch_group_count > 1 // (C15) or feature_group_count > 1 (C16) or if output_feature_dimension is + // static (C25). if (kernelType.isDynamicDim(kernelOutputFeatureDim) && (batchGroupCount > 1 || featureGroupCount > 1 || !resultType.isDynamicDim(outputFeatureDim))) return mlir::Speculation::NotSpeculatable; - // static (C25, second bullet). If a spatial dimension is static in the - // output, it must be static in the inputs. + // If a spatial dimension is static in the output, it must be static in the + // inputs (C25). for (auto [inputDim, kernelDim, resultDim] : llvm::zip(inputSpatialDims, kernelSpatialDims, outputSpatialDims)) { if (!resultType.isDynamicDim(resultDim) && diff --git a/stablehlo/dialect/StablehloOps.td b/stablehlo/dialect/StablehloOps.td index 701a35c0a4..74bfe3f86e 100644 --- a/stablehlo/dialect/StablehloOps.td +++ b/stablehlo/dialect/StablehloOps.td @@ -2792,7 +2792,8 @@ def StableHLO_SelectAndScatterOp: StableHLO_Op<"select_and_scatter", let hasVerifier = 1; } -def StableHLO_SetDimensionSizeOp: StableHLO_Op<"set_dimension_size", [Pure, +def StableHLO_SetDimensionSizeOp: StableHLO_Op<"set_dimension_size", + [ConditionallySpeculatable, NoMemoryEffect, InferTensorType]> { let summary = "SetDimensionSize operation"; let description = [{ From 8ba3c4a9a543f731591949e54641486045b34b75 Mon Sep 17 00:00:00 2001 From: Michael Levesque-Dion Date: Wed, 17 Apr 2024 11:45:26 -0700 Subject: [PATCH 4/4] Undo accidental changes to SetDimensionSizeOp --- stablehlo/dialect/StablehloOps.td | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stablehlo/dialect/StablehloOps.td b/stablehlo/dialect/StablehloOps.td index 74bfe3f86e..701a35c0a4 100644 --- a/stablehlo/dialect/StablehloOps.td +++ b/stablehlo/dialect/StablehloOps.td @@ -2792,8 +2792,7 @@ def StableHLO_SelectAndScatterOp: StableHLO_Op<"select_and_scatter", let hasVerifier = 1; } -def StableHLO_SetDimensionSizeOp: StableHLO_Op<"set_dimension_size", - [ConditionallySpeculatable, NoMemoryEffect, +def StableHLO_SetDimensionSizeOp: StableHLO_Op<"set_dimension_size", [Pure, InferTensorType]> { let summary = "SetDimensionSize operation"; let description = [{