Skip to content

Commit

Permalink
Automated Code Change
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 605568714
  • Loading branch information
tensorflower-gardener committed Feb 9, 2024
1 parent a293c28 commit d92a9cb
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 14 deletions.
6 changes: 3 additions & 3 deletions tensorflow/dtensor/mlir/layout_propagation_v2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1359,7 +1359,7 @@ Status RunOneIteration(
llvm::DenseMap<mlir::Value, std::vector<mlir::OpOperand*>>& consumers,
llvm::DenseMap<mlir::Value, Layout>& merged_layouts, mlir::ModuleOp& module,
int stage, int* steps) {
if (is_updated.empty()) return OkStatus();
if (is_updated.empty()) return absl::OkStatus();
// Merge any possibly updated layouts.
if (mlir::failed(
MergeAndGetUpdatedLayouts(is_locked, is_updated, producer_request,
Expand All @@ -1384,7 +1384,7 @@ Status RunOneIteration(
return errors::Internal("UpdateLayoutsForOp failed to update layouts.");
}
++(*steps);
return OkStatus();
return absl::OkStatus();
}

// Compares every value's layouts in `merged_a` with the ones in `merged_b`,
Expand All @@ -1406,7 +1406,7 @@ Status CompareMergedLayouts(const llvm::DenseMap<mlir::Value, Layout>& merged_a,
changed.insert(value);
}
}
return OkStatus();
return absl::OkStatus();
}

// MLIR pass that propagates layout for all ops the module.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/dtensor/mlir/shape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ Status InferSPMDExpandedLocalShapeForResourceOutput(
mlir::ArrayRef<mlir::TensorType>{local_variable_subtype}, context));
op_result->setType(new_var_type);
}
return OkStatus();
return absl::OkStatus();
}

mlir::Operation* InferSPMDExpandedLocalShape(mlir::Operation* op) {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/dtensor/mlir/sparse_expander.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ Status RunSparseExpansion(mlir::Operation* op, mlir::Operation** output) {
} else { // If there is no SparseTensor inputs then just return the op.
*output = op;
}
return OkStatus();
return absl::OkStatus();
}

} // namespace dtensor
Expand Down
8 changes: 4 additions & 4 deletions tensorflow/dtensor/mlir/spmd_expander.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ Status AdjustPartedLayout(const llvm::DenseMap<int, Layout>& input_layouts,
computed_layout.getSecond() = parted;
}
}
return OkStatus();
return absl::OkStatus();
}

// Returns whether DTensor should skip SPMD expansion because `op` uses parted
Expand Down Expand Up @@ -168,7 +168,7 @@ Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op,
}
SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>(
computed_layout.data(), computed_layout.size()));
return OkStatus();
return absl::OkStatus();
}

// `op` may be removed/replaced from the graph during SPMD expansion, so
Expand Down Expand Up @@ -239,7 +239,7 @@ Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op,
}
}

return OkStatus();
return absl::OkStatus();
}

StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward(
Expand Down Expand Up @@ -299,7 +299,7 @@ Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output) {
VLOG(1) << "No expansion found for " << OpName(op) << "\n";
*output = op;
}
return OkStatus();
return absl::OkStatus();
}

} // namespace dtensor
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/dtensor/mlir/spmd_expander_common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ Status CreateSplitOp(const int num_split, const int split_dimension,
llvm::SmallVector<mlir::Type, 4> output_types(num_split, output_type);
*split_op = builder->create<mlir::TF::SplitOp>(
location, output_types, split_dimension_op.getOutput(), src_input);
return OkStatus();
return absl::OkStatus();
}

// Given layouts + shapes, determines if the two are broadcasting compatible.
Expand Down Expand Up @@ -682,7 +682,7 @@ Status SetBuilderInsertionAfterValue(mlir::Value value,
mlir::OpBuilder& builder) {
if (value.isa<mlir::OpResult>()) {
builder.setInsertionPointAfterValue(value);
return OkStatus();
return absl::OkStatus();
}
mlir::tf_device::ClusterOp cluster;
for (mlir::Operation* op : value.getUsers()) {
Expand All @@ -696,7 +696,7 @@ Status SetBuilderInsertionAfterValue(mlir::Value value,
if (!cluster) return errors::Internal("value not used in any cluster");

builder.setInsertionPointToStart(cluster.SingleBlock::getBody());
return OkStatus();
return absl::OkStatus();
}

Status PrintTensor(mlir::Value value, const std::string& format_string = "%s") {
Expand All @@ -713,7 +713,7 @@ Status PrintTensor(mlir::Value value, const std::string& format_string = "%s") {
builder.create<mlir::TF::PrintV2Op>(value.getLoc(), format.getOutput(),
/*output_stream=*/"log(info)",
/*end=*/"\n");
return OkStatus();
return absl::OkStatus();
}

Status ExtractConstStringVectorFromValue(
Expand All @@ -731,7 +731,7 @@ Status ExtractConstStringVectorFromValue(
for (const auto& str : attr.getRawStringData()) {
out_vector.push_back(str.str());
}
return OkStatus();
return absl::OkStatus();
}

StatusOr<std::string> ExtractConstScalarStringFromValue(mlir::Value value) {
Expand Down

0 comments on commit d92a9cb

Please sign in to comment.