Skip to content

Integrate LLVM at llvm/llvm-project@2d287f51eff2 #2797

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions WORKSPACE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ workspace(name = "stablehlo")

load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")

LLVM_COMMIT = "c60f24dca96dad44afc60df3fcb80997737b6306"
LLVM_COMMIT = "2d287f51eff2a5fbf84458a33f7fb2493cf67965"

LLVM_SHA256 = "86ff363de84f28a0acc854e0bafb78d38162b0e09a59e9e57689dc02e4501d6d"
LLVM_SHA256 = "e06d0a35b0e0570b2f54dfd23d0e9fe6f084e032c14bb7ab194b06cb8c9cb86c"

http_archive(
name = "llvm-raw",
Expand Down
2 changes: 1 addition & 1 deletion build_tools/llvm_version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
c60f24dca96dad44afc60df3fcb80997737b6306
2d287f51eff2a5fbf84458a33f7fb2493cf67965
5 changes: 2 additions & 3 deletions stablehlo/conversions/linalg/tests/gather.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,11 @@ func.func @gather(%operand : tensor<1x4x8xi32>, %start_indices : tensor<1x8x2xi3
// CHECK-SAME: outs(%[[INIT]] : tensor<1x8x8xi32>)
// CHECK-SAME: {someattr}
// CHECK: ^bb0
// CHECK-DAG: %[[IDX0:.+]] = linalg.index 0
// CHECK-DAG: %[[IDX1:.+]] = linalg.index 1
// CHECK-DAG: %[[IDX2:.+]] = linalg.index 2
// CHECK-DAG: %[[S0_INT:.+]] = tensor.extract %[[START_INDICES]][%[[IDX0]], %[[IDX1]], %[[C0]]] : tensor<1x8x2xi32>
// CHECK-DAG: %[[S0_INT:.+]] = tensor.extract %[[START_INDICES]][%[[C0]], %[[IDX1]], %[[C0]]] : tensor<1x8x2xi32>
// CHECK-DAG: %[[S0:.+]] = arith.index_cast %[[S0_INT]] : i32 to index
// CHECK-DAG: %[[S1_INT:.+]] = tensor.extract %[[START_INDICES]][%[[IDX0]], %[[IDX1]], %[[C1]]] : tensor<1x8x2xi32>
// CHECK-DAG: %[[S1_INT:.+]] = tensor.extract %[[START_INDICES]][%[[C0]], %[[IDX1]], %[[C1]]] : tensor<1x8x2xi32>
// CHECK-DAG: %[[S1:.+]] = arith.index_cast %[[S1_INT]] : i32 to index
// CHECK-DAG: %[[CLAMP0:.+]] = arith.maxsi %[[S0]], %[[C0]] : index
// CHECK-DAG: %[[IN0:.+]] = arith.minsi %[[CLAMP0]], %[[C0]]
Expand Down
8 changes: 4 additions & 4 deletions stablehlo/conversions/linalg/tests/miscellaneous.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1233,6 +1233,7 @@ func.func @torch_index_select(%arg0: tensor<5x1x5xi32>,
// CHECK: func @torch_index_select
// CHECK-SAME: %[[INPUT:[a-zA-Z0-9_]*]]
// CHECK-SAME: %[[INDEX:[a-zA-Z0-9_]*]]
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[INIT1:.+]] = tensor.empty() :
// CHECK: %[[INIT2:.+]] = tensor.empty() :
// CHECK: linalg.generic {
Expand All @@ -1244,9 +1245,8 @@ func.func @torch_index_select(%arg0: tensor<5x1x5xi32>,
// CHECK-SAME: {someattr}
// CHECK: ^{{.+}}(%[[VAL:.+]]: i32, %{{.+}}: i32, %{{.+}}: i32):
// CHECK: %[[CAST:.+]] = arith.index_cast %[[VAL]] : i32 to index
// CHECK: %[[J:.+]] = linalg.index 1
// CHECK: %[[K:.+]] = linalg.index 2
// CHECK: %[[VAL2:.+]] = tensor.extract %[[INPUT]][%[[CAST]], %[[J]], %[[K]]] : tensor<5x1x5xi32>
// CHECK: %[[VAL2:.+]] = tensor.extract %[[INPUT]][%[[CAST]], %[[C0]], %[[K]]] : tensor<5x1x5xi32>
// CHECK: linalg.yield %[[VAL2]] : i32

// -----
Expand All @@ -1265,6 +1265,7 @@ func.func @torch_index_select_unsigned(%arg0: tensor<5x1x5xui32>,
} : (tensor<5x1x5xui32>, tensor<2xi32>) -> tensor<2x1x5xui32>
func.return %0 : tensor<2x1x5xui32>
}
// CHECK: %[[C0:.+]] = arith.constant 0 : index
// CHECK: %[[INPUT_SIGNLESS:.*]] = builtin.unrealized_conversion_cast %[[INPUT]] : tensor<5x1x5xui32> to tensor<5x1x5xi32>
// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<1x5xi32>
// CHECK: %[[RES:.+]] = linalg.generic {
Expand All @@ -1274,9 +1275,8 @@ func.func @torch_index_select_unsigned(%arg0: tensor<5x1x5xui32>,
// CHECK-SAME: ins(%[[INDEX]], %[[INIT]] : tensor<2xi32>, tensor<1x5xi32>)
// CHECK: ^{{.+}}(%[[VAL:.+]]: i32, %{{.+}}: i32, %{{.+}}: i32):
// CHECK: %[[CAST:.+]] = arith.index_cast %[[VAL]] : i32 to index
// CHECK: %[[J:.+]] = linalg.index 1
// CHECK: %[[K:.+]] = linalg.index 2
// CHECK: %[[VAL2:.+]] = tensor.extract %[[INPUT_SIGNLESS]][%[[CAST]], %[[J]], %[[K]]] : tensor<5x1x5xi32>
// CHECK: %[[VAL2:.+]] = tensor.extract %[[INPUT_SIGNLESS]][%[[CAST]], %[[C0]], %[[K]]] : tensor<5x1x5xi32>
// CHECK: linalg.yield %[[VAL2]] : i32
// CHECK: %[[RES_UNSIGNED:.+]] = builtin.unrealized_conversion_cast %[[RES]] : tensor<2x1x5xi32> to tensor<2x1x5xui32>
// CHECK: return %[[RES_UNSIGNED]]
Expand Down
6 changes: 3 additions & 3 deletions stablehlo/tests/TestUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ struct HloTestSpeculatabilityPass

void runOnOperation() override {
GreedyRewriteConfig config;
config.maxIterations = 1;
config.useTopDownTraversal = true;
config.enableRegionSimplification = GreedySimplifyRegionLevel::Disabled;
config.setMaxIterations(1)
.setUseTopDownTraversal(true)
.setRegionSimplificationLevel(GreedySimplifyRegionLevel::Disabled);
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}

Expand Down
12 changes: 6 additions & 6 deletions stablehlo/transforms/StablehloCanonicalizeDynamism.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,11 +308,11 @@ struct StablehloCanonicalizeDynamismPass
StablehloCanonicalizeDynamismPassBase;

LogicalResult initialize(MLIRContext* context) override {
config.useTopDownTraversal = true;
config.enableRegionSimplification = GreedySimplifyRegionLevel::Aggressive;
config.maxIterations = 2;
config.maxNumRewrites = GreedyRewriteConfig::kNoLimit;
config.strictMode = GreedyRewriteStrictness::AnyOp;
config.setUseTopDownTraversal(true)
.setRegionSimplificationLevel(GreedySimplifyRegionLevel::Aggressive)
.setMaxIterations(2)
.setMaxNumRewrites(GreedyRewriteConfig::kNoLimit)
.setStrictness(GreedyRewriteStrictness::AnyOp);

RewritePatternSet patterns_(context);
populateStablehloCanonicalizeDynamismPatterns(context, &patterns_);
Expand All @@ -325,7 +325,7 @@ struct StablehloCanonicalizeDynamismPass
auto func = getOperation();
if (failed(applyPatternsGreedily(func, patterns, config))) {
func.emitError("Failed to converge StablehloCanonicalizeDynamism in ")
<< config.maxIterations << " iterations";
<< config.getMaxIterations() << " iterations";
}
}

Expand Down
4 changes: 2 additions & 2 deletions stablehlo/transforms/StablehloCompatibilityExpander.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ struct StablehloCompatibilityExpanderPass
LogicalResult initialize(MLIRContext *context) override {
auto targetVersion = validateTargetVersion(targetVersionOption);

config.useTopDownTraversal = true;
config.setUseTopDownTraversal(true);

RewritePatternSet patterns_(context);
populateStablehloCompatibilityExpanderPatterns(context, &patterns_,
Expand All @@ -347,7 +347,7 @@ struct StablehloCompatibilityExpanderPass
failed(applyPatternsGreedily(module, patterns, config))) {
module.emitError(
"Failed to converge StableHLOCompatibilityExpanderPass in ")
<< config.maxIterations << " iterations";
<< config.getMaxIterations() << " iterations";
signalPassFailure();
}
}
Expand Down
4 changes: 2 additions & 2 deletions stablehlo/transforms/StablehloComplexMathExpander.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ struct StablehloComplexMathExpanderPass

public:
LogicalResult initialize(MLIRContext *context) override {
config.useTopDownTraversal = true;
config.setUseTopDownTraversal(true);
RewritePatternSet patterns_(context);
populateStablehloComplexMathExpanderPatterns(context, &patterns_);
patterns = std::move(patterns_);
Expand All @@ -62,7 +62,7 @@ struct StablehloComplexMathExpanderPass
auto func = getOperation();
if (failed(applyPatternsGreedily(func, patterns, config))) {
func.emitError("Failed to converge StableHLOComplexMathExpanderPass in ")
<< config.maxIterations << " iterations";
<< config.getMaxIterations() << " iterations";
signalPassFailure();
}
}
Expand Down
2 changes: 1 addition & 1 deletion stablehlo/transforms/StablehloLegalizeQDQToQuantizedOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class StablehloLegalizeQDQToQuantizedOpPass
if (failed(applyPatternsGreedily(func, patterns, config))) {
func.emitError(
"Failed to converge StablehloLegalizeQDQToQuantizedOpPass in ")
<< config.maxIterations << " iterations";
<< config.getMaxIterations() << " iterations";
signalPassFailure();
}
}
Expand Down
2 changes: 1 addition & 1 deletion stablehlo/transforms/StablehloLegalizeQuantizedOpToQDQ.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class StablehloLegalizeQuantizedOpToQDQPass
auto func = getOperation();
if (failed(applyPatternsGreedily(func, patterns, config))) {
func.emitError("Failed to converge StablehloLegalizeQuantizedOpToQDQ in ")
<< config.maxIterations << " iterations";
<< config.getMaxIterations() << " iterations";
signalPassFailure();
}
}
Expand Down
16 changes: 8 additions & 8 deletions stablehlo/transforms/StablehloRefineShapes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1023,14 +1023,14 @@ LogicalResult applyShapeRefinementPatterns(func::FuncOp func,
// The algorithm behind this pass consists of a single traversal of the
// function. This is sufficient because we only support one function per
// program at the moment.
// TODO(#1048): Find out why .maxIterations = 1 no longer works.
// TODO(#1048): Find out why .setMaxIterations(1) no longer works.
// There have been recent refactors to applyPatternsGreedily
// upstream, and that might be the reason.
config.useTopDownTraversal = true;
config.enableRegionSimplification = GreedySimplifyRegionLevel::Aggressive;
config.maxIterations = 2;
config.maxNumRewrites = GreedyRewriteConfig::kNoLimit;
config.strictMode = GreedyRewriteStrictness::AnyOp;
config.setUseTopDownTraversal(true)
.setRegionSimplificationLevel(GreedySimplifyRegionLevel::Aggressive)
.setMaxIterations(2)
.setMaxNumRewrites(GreedyRewriteConfig::kNoLimit)
.setStrictness(GreedyRewriteStrictness::AnyOp);

populateStablehloRefineShapesPatterns(context, &patterns);
patterns.add<RefineCallOpPattern>(context, state);
Expand All @@ -1049,7 +1049,7 @@ LogicalResult applyShapeRefinementPatterns(func::FuncOp func,

if (failed(applyPatternsGreedily(func, std::move(patterns), config)))
func.emitError("Failed to converge StablehloRefineShapes in ")
<< config.maxIterations << " iterations";
<< config.getMaxIterations() << " iterations";

return success();
}
Expand Down Expand Up @@ -1097,7 +1097,7 @@ LogicalResult refineFunction(MLIRContext& context, RefineShapeState& state,
size_t firstFunctionalArgument =
leadingTokenOperands + key.getGlobalConstants().size();
argIndices.set(leadingTokenOperands, firstFunctionalArgument);
func.eraseArguments(argIndices);
if (failed(func.eraseArguments(argIndices))) return failure();

// Refine the remaining argument types, wrap with shape buffer custom calls.
SmallVector<Type> refinedTypes =
Expand Down
2 changes: 1 addition & 1 deletion stablehlo/transforms/StablehloWrapInComposite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ class StablehloWrapInCompositePass

void runOnOperation() override {
GreedyRewriteConfig config;
config.strictMode = GreedyRewriteStrictness::ExistingOps;
config.setStrictness(GreedyRewriteStrictness::ExistingOps);
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns),
config))) {
signalPassFailure();
Expand Down
Loading