diff --git a/tensorflow/dtensor/mlir/layout_propagation_v2.cc b/tensorflow/dtensor/mlir/layout_propagation_v2.cc index adf9219a4fd922..a7f089eb1726ec 100644 --- a/tensorflow/dtensor/mlir/layout_propagation_v2.cc +++ b/tensorflow/dtensor/mlir/layout_propagation_v2.cc @@ -1359,7 +1359,7 @@ Status RunOneIteration( llvm::DenseMap>& consumers, llvm::DenseMap& merged_layouts, mlir::ModuleOp& module, int stage, int* steps) { - if (is_updated.empty()) return OkStatus(); + if (is_updated.empty()) return absl::OkStatus(); // Merge any possibly updated layouts. if (mlir::failed( MergeAndGetUpdatedLayouts(is_locked, is_updated, producer_request, @@ -1384,7 +1384,7 @@ Status RunOneIteration( return errors::Internal("UpdateLayoutsForOp failed to update layouts."); } ++(*steps); - return OkStatus(); + return absl::OkStatus(); } // Compares every value's layouts in `merged_a` with the ones in `merged_b`, @@ -1406,7 +1406,7 @@ Status CompareMergedLayouts(const llvm::DenseMap& merged_a, changed.insert(value); } } - return OkStatus(); + return absl::OkStatus(); } // MLIR pass that propagates layout for all ops the module. diff --git a/tensorflow/dtensor/mlir/shape_utils.cc b/tensorflow/dtensor/mlir/shape_utils.cc index 57b07683f67940..d0eda18a6bc94b 100644 --- a/tensorflow/dtensor/mlir/shape_utils.cc +++ b/tensorflow/dtensor/mlir/shape_utils.cc @@ -239,7 +239,7 @@ Status InferSPMDExpandedLocalShapeForResourceOutput( mlir::ArrayRef{local_variable_subtype}, context)); op_result->setType(new_var_type); } - return OkStatus(); + return absl::OkStatus(); } mlir::Operation* InferSPMDExpandedLocalShape(mlir::Operation* op) { diff --git a/tensorflow/dtensor/mlir/sparse_expander.cc b/tensorflow/dtensor/mlir/sparse_expander.cc index c471fbc1ed4241..92c5d616436423 100644 --- a/tensorflow/dtensor/mlir/sparse_expander.cc +++ b/tensorflow/dtensor/mlir/sparse_expander.cc @@ -64,7 +64,7 @@ Status RunSparseExpansion(mlir::Operation* op, mlir::Operation** output) { } else { // If there is no SparseTensor inputs then just return the op. *output = op; } - return OkStatus(); + return absl::OkStatus(); } } // namespace dtensor diff --git a/tensorflow/dtensor/mlir/spmd_expander.cc b/tensorflow/dtensor/mlir/spmd_expander.cc index ce6b34c7a004b5..4e63f87970777f 100644 --- a/tensorflow/dtensor/mlir/spmd_expander.cc +++ b/tensorflow/dtensor/mlir/spmd_expander.cc @@ -73,7 +73,7 @@ Status AdjustPartedLayout(const llvm::DenseMap& input_layouts, computed_layout.getSecond() = parted; } } - return OkStatus(); + return absl::OkStatus(); } // Returns whether DTensor should skip SPMD expansion because `op` uses parted @@ -168,7 +168,7 @@ Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op, } SetLayoutOnOp(*output, absl::Span>( computed_layout.data(), computed_layout.size())); - return OkStatus(); + return absl::OkStatus(); } // `op` may be removed/replaced from the graph during SPMD expansion, so @@ -239,7 +239,7 @@ Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op, } } - return OkStatus(); + return absl::OkStatus(); } StatusOr> SPMDExpanderBase::ComputeLayoutForward( @@ -299,7 +299,7 @@ Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output) { VLOG(1) << "No expansion found for " << OpName(op) << "\n"; *output = op; } - return OkStatus(); + return absl::OkStatus(); } } // namespace dtensor diff --git a/tensorflow/dtensor/mlir/spmd_expander_common.cc b/tensorflow/dtensor/mlir/spmd_expander_common.cc index 7f411c4da74c73..b3f823ae7e9fc4 100644 --- a/tensorflow/dtensor/mlir/spmd_expander_common.cc +++ b/tensorflow/dtensor/mlir/spmd_expander_common.cc @@ -141,7 +141,7 @@ Status CreateSplitOp(const int num_split, const int split_dimension, llvm::SmallVector output_types(num_split, output_type); *split_op = builder->create( location, output_types, split_dimension_op.getOutput(), src_input); - return OkStatus(); + return absl::OkStatus(); } // Given layouts + shapes, determines if the two are broadcasting compatible. @@ -682,7 +682,7 @@ Status SetBuilderInsertionAfterValue(mlir::Value value, mlir::OpBuilder& builder) { if (value.isa()) { builder.setInsertionPointAfterValue(value); - return OkStatus(); + return absl::OkStatus(); } mlir::tf_device::ClusterOp cluster; for (mlir::Operation* op : value.getUsers()) { @@ -696,7 +696,7 @@ Status SetBuilderInsertionAfterValue(mlir::Value value, if (!cluster) return errors::Internal("value not used in any cluster"); builder.setInsertionPointToStart(cluster.SingleBlock::getBody()); - return OkStatus(); + return absl::OkStatus(); } Status PrintTensor(mlir::Value value, const std::string& format_string = "%s") { @@ -713,7 +713,7 @@ Status PrintTensor(mlir::Value value, const std::string& format_string = "%s") { builder.create(value.getLoc(), format.getOutput(), /*output_stream=*/"log(info)", /*end=*/"\n"); - return OkStatus(); + return absl::OkStatus(); } Status ExtractConstStringVectorFromValue( @@ -731,7 +731,7 @@ Status ExtractConstStringVectorFromValue( for (const auto& str : attr.getRawStringData()) { out_vector.push_back(str.str()); } - return OkStatus(); + return absl::OkStatus(); } StatusOr ExtractConstScalarStringFromValue(mlir::Value value) {