ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
f0b886bd-62f2-4655-ab1b-f6fd4c08d769 | cpp | tensorflow/tensorflow | xla_builder | third_party/xla/xla/hlo/builder/xla_builder.cc | third_party/xla/xla/hlo/builder/xla_builder_test.cc | #include "xla/hlo/builder/xla_builder.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <queue>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/padding.h"
#include "xla/hlo/builder/sharding_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/sharding_op_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
namespace {
static const char kNameSeparator = '.';
std::string GetBaseName(const std::string& name, char separator) {
auto pos = name.rfind(separator);
CHECK_NE(pos, std::string::npos) << name;
return name.substr(0, pos);
}
std::string GetFullName(const std::string& base_name, char separator,
int64_t id) {
const char separator_str[] = {separator, '\0'};
return StrCat(base_name, separator_str, id);
}
template <typename T>
void SetProtoIdAndName(T* entry, const std::string& base_name, char separator,
int64_t id) {
entry->set_id(id);
entry->set_name(GetFullName(base_name, separator, id));
}
bool InstrIsSetBound(const HloInstructionProto* instr_proto) {
HloOpcode opcode = StringToHloOpcode(instr_proto->opcode()).value();
if (opcode == HloOpcode::kCustomCall &&
instr_proto->custom_call_target() == "SetBound") {
return true;
}
return false;
}
absl::Status NormalizeAndAssignSharing(HloInstructionProto* instr,
const OpSharding& op_sharding) {
Shape shape(instr->shape());
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(op_sharding));
sharding = sharding.NormalizeTupleSharding(shape);
TF_RETURN_IF_ERROR(sharding.Validate(shape));
*instr->mutable_sharding() = sharding.ToProto();
return absl::OkStatus();
}
}
namespace internal {
XlaOp XlaBuilderFriend::BuildAddDependency(XlaBuilder* builder, XlaOp operand,
XlaOp token, const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAddDependency,
{operand, token});
});
}
XlaOp XlaBuilderFriend::BuildFusion(
XlaBuilder* builder, absl::Span<const XlaOp> operands,
absl::string_view fusion_kind, const XlaComputation& fused_computation,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
instr.set_fusion_kind(std::string(fusion_kind));
if (!output_operand_aliasing.empty()) {
for (const auto& pair : output_operand_aliasing) {
auto aliasing = instr.add_output_operand_aliasing();
aliasing->set_operand_index(pair.second.first);
for (int64_t index : pair.second.second) {
aliasing->add_operand_shape_index(index);
}
for (int64_t index : pair.first) {
aliasing->add_output_shape_index(index);
}
}
}
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(auto program_shape,
fused_computation.GetProgramShape());
*instr.mutable_shape() = program_shape.result().ToProto();
builder->AddCalledComputation(fused_computation, &instr);
return builder->AddInstruction(std::move(instr), HloOpcode::kFusion,
operands);
});
}
std::pair<XlaOp, int64_t> XlaBuilderFriend::BuildAsyncStart(
XlaBuilder* builder, absl::Span<const XlaOp> operands,
std::string execution_thread, const XlaComputation& called_computation,
const Shape& shape) {
int64_t called_computation_id;
auto start_op = builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_async_execution_thread(execution_thread);
builder->AddCalledComputation(called_computation, &instr);
called_computation_id = instr.called_computation_ids()[0];
return builder->AddInstruction(std::move(instr), HloOpcode::kAsyncStart,
operands);
});
return {start_op, called_computation_id};
}
XlaOp XlaBuilderFriend::BuildAsyncUpdate(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAsyncUpdate,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildAsyncDone(XlaBuilder* builder, const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAsyncDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildAllGatherStart(
XlaBuilder* builder, const XlaOp operand, int64_t all_gather_dimension,
int64_t shard_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return builder->AllGatherImpl(operand, all_gather_dimension, shard_count,
replica_groups, channel_id, layout,
use_global_device_ids, true);
}
XlaOp XlaBuilderFriend::BuildAllGatherDone(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAllGatherDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildAllReduceStart(
XlaBuilder* builder, XlaOp operand, const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& layout,
const std::optional<bool> use_global_device_ids) {
return builder->AllReduceImpl(operand, computation, replica_groups,
channel_id, layout, use_global_device_ids,
true);
}
XlaOp XlaBuilderFriend::BuildAllReduceDone(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kAllReduceDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildCopyStart(
XlaBuilder* builder, const XlaOp operand,
std::optional<int> cross_program_prefetch_index) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
if (cross_program_prefetch_index) {
instr.set_cross_program_prefetch_index(*cross_program_prefetch_index);
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape,
builder->GetShapePtr(operand));
Shape u32 = ShapeUtil::MakeScalarShape(PrimitiveType::U32);
Shape shape =
ShapeUtil::MakeTupleShapeWithPtrs({operand_shape, operand_shape, &u32});
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kCopyStart,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildCopyDone(XlaBuilder* builder, const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kCopyDone,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildCollectivePermuteStart(
XlaBuilder* builder, XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id) {
return builder->CollectivePermuteImpl(operand, source_target_pairs,
channel_id, true);
}
XlaOp XlaBuilderFriend::BuildCollectivePermuteDone(XlaBuilder* builder,
const XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(
std::move(instr), HloOpcode::kCollectivePermuteDone, {operand});
});
}
XlaOp XlaBuilderFriend::BuildBitcast(XlaBuilder* builder, XlaOp operand,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kBitcast,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildDomain(XlaBuilder* builder, XlaOp operand,
const OpSharding entry,
const OpSharding exit, const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_domain_entry_sharding() = entry;
*instr.mutable_domain_exit_sharding() = exit;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kDomain,
{operand});
});
}
XlaOp XlaBuilderFriend::BuildPartitionId(XlaBuilder* builder,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr), HloOpcode::kPartitionId);
});
}
XlaOp XlaBuilderFriend::BuildSend(XlaBuilder* builder, XlaOp operand,
XlaOp token, const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto send_instr;
TF_ASSIGN_OR_RETURN(const Shape* shape, builder->GetShapePtr(operand));
*send_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({*shape, ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()})
.ToProto();
send_instr.set_channel_id(handle.handle());
send_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(send_instr), HloOpcode::kSend,
{operand, token});
});
}
XlaOp XlaBuilderFriend::BuildSendDone(XlaBuilder* builder, XlaOp operand,
const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto send_done_instr;
*send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
send_done_instr.set_channel_id(handle.handle());
send_done_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(send_done_instr),
HloOpcode::kSendDone, {operand});
});
}
XlaOp XlaBuilderFriend::BuildRecv(XlaBuilder* builder, XlaOp token,
const Shape& shape,
const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto recv_instr;
*recv_instr.mutable_shape() =
ShapeUtil::MakeTupleShape(
{shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()})
.ToProto();
recv_instr.set_channel_id(handle.handle());
recv_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(recv_instr), HloOpcode::kRecv,
{token});
});
}
XlaOp XlaBuilderFriend::BuildRecvDone(XlaBuilder* builder, XlaOp token,
const Shape& shape,
const ChannelHandle& handle,
bool is_host_transfer) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto recv_done_instr;
*recv_done_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()})
.ToProto();
recv_done_instr.set_channel_id(handle.handle());
recv_done_instr.set_is_host_transfer(is_host_transfer);
return builder->AddInstruction(std::move(recv_done_instr),
HloOpcode::kRecvDone, {token});
});
}
XlaOp XlaBuilderFriend::BuildRngGetAndUpdateState(XlaBuilder* builder,
int64_t delta,
const Shape& shape) {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
instr.set_delta(delta);
*instr.mutable_shape() = shape.ToProto();
return builder->AddInstruction(std::move(instr),
HloOpcode::kRngGetAndUpdateState);
});
}
HloInstructionProto* XlaBuilderFriend::GetInstruction(XlaOp op) {
return &op.builder()
->instructions_[op.builder()->handle_to_index_[op.handle_]];
}
HloInstructionProto* XlaBuilderFriend::GetInstructionByHandle(
XlaBuilder* builder, int64_t handle) {
return &builder->instructions_[builder->handle_to_index_[handle]];
}
}
XlaOp operator-(XlaOp x) { return Neg(x); }
XlaOp operator+(XlaOp x, XlaOp y) { return Add(x, y); }
XlaOp operator-(XlaOp x, XlaOp y) { return Sub(x, y); }
XlaOp operator*(XlaOp x, XlaOp y) { return Mul(x, y); }
XlaOp operator/(XlaOp x, XlaOp y) { return Div(x, y); }
XlaOp operator%(XlaOp x, XlaOp y) { return Rem(x, y); }
XlaOp operator~(XlaOp x) { return Not(x); }
XlaOp operator&(XlaOp x, XlaOp y) { return And(x, y); }
XlaOp operator|(XlaOp x, XlaOp y) { return Or(x, y); }
XlaOp operator^(XlaOp x, XlaOp y) { return Xor(x, y); }
XlaOp operator<<(XlaOp x, XlaOp y) { return ShiftLeft(x, y); }
XlaOp operator>>(XlaOp x, XlaOp y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, builder->GetShapePtr(x));
if (!ShapeUtil::ElementIsIntegral(*shape)) {
return InvalidArgument(
"Argument to >> operator does not have an integral type (%s).",
ShapeUtil::HumanString(*shape));
}
if (ShapeUtil::ElementIsSigned(*shape)) {
return ShiftRightArithmetic(x, y);
} else {
return ShiftRightLogical(x, y);
}
});
}
absl::StatusOr<const Shape*> XlaBuilder::GetShapePtr(XlaOp op) const {
TF_RETURN_IF_ERROR(first_error_);
TF_RETURN_IF_ERROR(CheckOpBuilder(op));
auto it = handle_to_index_.find(op.handle());
if (it == handle_to_index_.end()) {
return InvalidArgument("No XlaOp with handle %d", op.handle());
}
return instruction_shapes_.at(it->second).get();
}
absl::StatusOr<Shape> XlaBuilder::GetShape(XlaOp op) const {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(op));
return *shape;
}
absl::StatusOr<std::vector<Shape>> XlaBuilder::GetOperandShapes(
absl::Span<const XlaOp> operands) const {
std::vector<Shape> operand_shapes;
operand_shapes.reserve(operands.size());
for (XlaOp operand : operands) {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
operand_shapes.push_back(*shape);
}
return operand_shapes;
}
absl::StatusOr<std::optional<OpSharding>> XlaBuilder::GetOpSharding(
XlaOp op) const {
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpInstruction(op));
if (instr_proto->has_sharding()) {
return instr_proto->sharding();
}
return std::nullopt;
}
std::string XlaBuilder::OpToString(XlaOp op) const {
std::string s;
ToStringHelper(&s, 0, op.handle());
return s;
}
static std::string ShapeToString(const ShapeProto& shape) {
if (shape.tuple_shapes_size() > 1) {
return absl::StrCat(
"(",
absl::StrJoin(shape.tuple_shapes(), ", ",
[&](std::string* s, const ShapeProto& subshape) {
absl::StrAppend(s, ShapeToString(subshape));
}),
")");
}
return absl::StrCat("[", absl::StrJoin(shape.dimensions(), ", "), "]");
}
void XlaBuilder::ToStringHelper(std::string* out, int ident,
int64_t op_handle) const {
const HloInstructionProto& instr =
*(LookUpInstructionByHandle(op_handle).value());
absl::StrAppend(out, std::string(ident, ' '), instr.opcode(),
", shape=", ShapeToString(instr.shape()));
if (instr.has_metadata()) {
absl::StrAppend(out, ", metadata={", instr.metadata().source_file(), ":",
instr.metadata().source_line(), "}");
}
if (instr.operand_ids_size()) {
absl::StrAppend(out, "\n");
}
absl::StrAppend(out, absl::StrJoin(instr.operand_ids(), "\n",
[&](std::string* s, int64_t subop) {
ToStringHelper(s, ident + 2, subop);
}));
}
XlaBuilder::XlaBuilder(const std::string& computation_name)
: name_(computation_name) {}
XlaBuilder::~XlaBuilder() = default;
XlaOp XlaBuilder::ReportError(const absl::Status& error) {
CHECK(!error.ok());
if (die_immediately_on_error_) {
LOG(FATAL) << "error building computation: " << error;
}
if (first_error_.ok()) {
first_error_ = error;
first_error_backtrace_.CreateCurrent(1);
}
return XlaOp(this);
}
XlaOp XlaBuilder::ReportErrorOrReturn(const absl::StatusOr<XlaOp>& op) {
if (!first_error_.ok()) {
return XlaOp(this);
}
if (!op.ok()) {
return ReportError(op.status());
}
return op.value();
}
XlaOp XlaBuilder::ReportErrorOrReturn(
absl::FunctionRef<absl::StatusOr<XlaOp>()> op_creator) {
return ReportErrorOrReturn(op_creator());
}
absl::StatusOr<ProgramShape> XlaBuilder::GetProgramShape(
int64_t root_id) const {
TF_RETURN_IF_ERROR(first_error_);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root_proto,
LookUpInstructionByHandle(root_id));
ProgramShape program_shape;
*program_shape.mutable_result() = Shape(root_proto->shape());
const int64_t param_count = parameter_numbers_.size();
for (int64_t i = 0; i < param_count; i++) {
program_shape.add_parameters();
program_shape.add_parameter_names();
}
for (const HloInstructionProto& instr : instructions_) {
if (instr.opcode() == HloOpcodeString(HloOpcode::kParameter)) {
const int64_t index = instr.parameter_number();
TF_RET_CHECK(index >= 0 && index < param_count)
<< "invalid parameter number: " << index;
*program_shape.mutable_parameters(index) = Shape(instr.shape());
*program_shape.mutable_parameter_names(index) = instr.name();
}
}
return program_shape;
}
absl::StatusOr<ProgramShape> XlaBuilder::GetProgramShape() const {
TF_RET_CHECK(!instructions_.empty());
return GetProgramShape(instructions_.back().id());
}
absl::StatusOr<ProgramShape> XlaBuilder::GetProgramShape(XlaOp root) const {
if (root.builder_ != this) {
return InvalidArgument("Given root operation is not in this computation.");
}
return GetProgramShape(root.handle());
}
void XlaBuilder::IsConstantVisitor(const int64_t op_handle, int depth,
absl::flat_hash_set<int64_t>* visited,
bool* is_constant) const {
if (visited->contains(op_handle) || !*is_constant) {
return;
}
const HloInstructionProto& instr =
*(LookUpInstructionByHandle(op_handle).value());
HloInstructionProto to_print(instr);
to_print.clear_shape();
const HloOpcode opcode = StringToHloOpcode(instr.opcode()).value();
const std::string indent =
absl::StrJoin(std::vector<absl::string_view>(depth, " "), "");
if (VLOG_IS_ON(2)) {
VLOG(2) << indent << "Visiting:";
for (const auto& l : absl::StrSplit(to_print.DebugString(), '\n')) {
VLOG(2) << indent << l;
}
}
switch (opcode) {
default:
for (const int64_t operand_id : instr.operand_ids()) {
IsConstantVisitor(operand_id, depth + 1, visited, is_constant);
}
break;
case HloOpcode::kGetDimensionSize:
break;
case HloOpcode::kRng:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kCall:
case HloOpcode::kCustomCall:
if (instr.custom_call_target() == "SetBound") {
break;
}
[[fallthrough]];
case HloOpcode::kWhile:
case HloOpcode::kScatter:
case HloOpcode::kSend:
case HloOpcode::kRecv:
case HloOpcode::kParameter:
*is_constant = false;
break;
case HloOpcode::kGetTupleElement: {
const HloInstructionProto& operand_instr =
*(LookUpInstructionByHandle(instr.operand_ids(0)).value());
if (HloOpcodeString(HloOpcode::kTuple) == operand_instr.opcode()) {
IsConstantVisitor(operand_instr.operand_ids(instr.tuple_index()),
depth + 1, visited, is_constant);
} else {
for (const int64_t operand_id : instr.operand_ids()) {
IsConstantVisitor(operand_id, depth + 1, visited, is_constant);
}
}
}
}
if (VLOG_IS_ON(1) && !*is_constant) {
VLOG(1) << indent << "Non-constant: ";
for (const auto& l : absl::StrSplit(to_print.DebugString(), '\n')) {
VLOG(1) << indent << l;
}
}
visited->insert(op_handle);
}
absl::Status XlaBuilder::SetInstructionFrontendAttribute(const XlaOp op,
std::string attribute,
std::string value) {
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op));
auto* frontend_attributes = instr_proto->mutable_frontend_attributes();
(*frontend_attributes->mutable_map())[attribute] = std::move(value);
return absl::OkStatus();
}
absl::Status XlaBuilder::SetInstructionSharding(
XlaOp op, const std::optional<OpSharding>& sharding) {
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op));
if (!sharding.has_value()) {
instr_proto->clear_sharding();
return absl::OkStatus();
}
return NormalizeAndAssignSharing(instr_proto, sharding.value());
}
XlaComputation XlaBuilder::BuildAndNoteError() {
DCHECK(parent_builder_ != nullptr);
auto build_status = Build();
if (!build_status.ok()) {
parent_builder_->ReportError(
AddStatus(build_status.status(), absl::StrCat("error from: ", name_)));
return {};
}
return std::move(build_status).value();
}
absl::Status XlaBuilder::GetCurrentStatus() const {
if (!first_error_.ok()) {
std::string backtrace;
first_error_backtrace_.Dump(tsl::DebugWriteToString, &backtrace);
return AppendStatus(first_error_, backtrace);
}
return absl::OkStatus();
}
absl::StatusOr<XlaComputation> XlaBuilder::Build(
bool remove_dynamic_dimensions) {
TF_RETURN_IF_ERROR(GetCurrentStatus());
return Build(instructions_.back().id(), remove_dynamic_dimensions);
}
absl::StatusOr<XlaComputation> XlaBuilder::Build(
XlaOp root, bool remove_dynamic_dimensions) {
if (root.builder_ != this) {
return InvalidArgument("Given root operation is not in this computation.");
}
return Build(root.handle(), remove_dynamic_dimensions);
}
absl::StatusOr<XlaComputation> XlaBuilder::Build(
int64_t root_id, bool remove_dynamic_dimensions) {
TF_RETURN_IF_ERROR(GetCurrentStatus());
if (remove_dynamic_dimensions) {
std::function<void(Shape*)> remove_dynamic_dimension = [&](Shape* shape) {
if (shape->tuple_shapes_size() != 0) {
for (int i = 0; i < shape->tuple_shapes_size(); ++i) {
remove_dynamic_dimension(shape->mutable_tuple_shapes(i));
}
}
for (int64_t i = 0; i < shape->dimensions_size(); ++i) {
shape->set_dynamic_dimension(i, false);
}
};
for (size_t index = 0; index < instructions_.size(); ++index) {
remove_dynamic_dimension(instruction_shapes_[index].get());
*instructions_[index].mutable_shape() =
instruction_shapes_[index]->ToProto();
}
}
HloComputationProto entry;
SetProtoIdAndName(&entry, name_, kNameSeparator, GetNextId());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, GetProgramShape(root_id));
*entry.mutable_program_shape() = program_shape.ToProto();
entry.set_root_id(root_id);
for (auto& instruction : instructions_) {
instruction.set_name(
GetFullName(instruction.name(), kNameSeparator, instruction.id()));
entry.add_instructions()->Swap(&instruction);
}
XlaComputation computation(entry.id());
HloModuleProto* module = computation.mutable_proto();
module->set_name(entry.name());
module->set_id(entry.id());
module->set_entry_computation_name(entry.name());
module->set_entry_computation_id(entry.id());
*module->mutable_host_program_shape() = entry.program_shape();
for (auto& e : embedded_) {
module->add_computations()->Swap(&e.second);
}
module->add_computations()->Swap(&entry);
if (!input_output_aliases_.empty() || !buffer_donors_.empty()) {
TF_RETURN_IF_ERROR(PopulateInputOutputAliasAndBufferDonor(
module, program_shape, input_output_aliases_, buffer_donors_));
}
this->instructions_.clear();
this->instruction_shapes_.clear();
this->handle_to_index_.clear();
this->embedded_.clear();
this->parameter_numbers_.clear();
return std::move(computation);
}
absl::Status XlaBuilder::PopulateInputOutputAliasAndBufferDonor(
HloModuleProto* module, const ProgramShape& program_shape,
const std::vector<InputOutputAlias>& input_output_aliases,
const absl::flat_hash_set<HloBufferDonorConfig::BufferDonor>&
buffer_donors) {
HloInputOutputAliasConfig io_alias_config(program_shape.result());
for (auto& alias : input_output_aliases) {
if (alias.param_number >= program_shape.parameters_size()) {
return InvalidArgument("Invalid parameter number %ld (total %ld)",
alias.param_number,
program_shape.parameters_size());
}
const Shape& parameter_shape = program_shape.parameters(alias.param_number);
if (!ShapeUtil::IndexIsValid(parameter_shape, alias.param_index)) {
return InvalidArgument("Invalid parameter %ld index: %s",
alias.param_number,
alias.param_index.ToString().c_str());
}
TF_RETURN_IF_ERROR(io_alias_config.SetUpAlias(
alias.output_index, alias.param_number, alias.param_index, alias.kind));
}
*module->mutable_input_output_alias() = io_alias_config.ToProto();
HloBufferDonorConfig buffer_donor_config;
for (auto& donor : buffer_donors) {
if (donor.param_number >= program_shape.parameters_size()) {
return InvalidArgument("Invalid parameter number %ld (total %ld)",
donor.param_number,
program_shape.parameters_size());
}
const Shape& parameter_shape = program_shape.parameters(donor.param_number);
if (!ShapeUtil::IndexIsValid(parameter_shape, donor.param_index)) {
return InvalidArgument("Invalid parameter %ld index: %s",
donor.param_number,
donor.param_index.ToString().c_str());
}
if (io_alias_config.ParameterHasAlias(donor.param_number,
donor.param_index)) {
return InvalidArgument(
"Parameter %ld index %s is already aliased with one output, thus it "
"cannot be added as a buffer donor for any output.",
donor.param_number, donor.param_index.ToString().c_str());
}
TF_RETURN_IF_ERROR(buffer_donor_config.AddBufferDonor(donor.param_number,
donor.param_index));
}
*module->mutable_buffer_donor() = buffer_donor_config.ToProto();
return absl::OkStatus();
}
XlaOp XlaBuilder::MhloDynamicReshape(XlaOp operand, XlaOp output_shape,
const Shape& shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (operand_shape->element_type() != shape.element_type()) {
return InvalidArgument(
"Element type of operand %s and output %s must match",
ShapeUtil::HumanString(*operand_shape),
ShapeUtil::HumanString(shape));
}
if (operand_shape->is_static() && shape.is_static() &&
ShapeUtil::ElementsIn(*operand_shape) != ShapeUtil::ElementsIn(shape)) {
return InvalidArgument(
"MhloDynamicReshape has mismatched element counts: from=%d (%s) "
"to=%d (%s)",
ShapeUtil::ElementsIn(*operand_shape),
ShapeUtil::HumanString(*operand_shape), ShapeUtil::ElementsIn(shape),
ShapeUtil::HumanString(shape));
}
TF_ASSIGN_OR_RETURN(const Shape* output_shape_shape,
GetShapePtr(output_shape));
if (output_shape_shape->dimensions(0) != shape.rank()) {
return InvalidArgument(
"output_shape dimension size=%d (%s) and rank of shape=%d (%s) must "
"match",
output_shape_shape->dimensions(0),
ShapeUtil::HumanString(*output_shape_shape), shape.rank(),
ShapeUtil::HumanString(shape));
}
return xla::CustomCall(operand.builder(), "mhlo.dynamic_reshape",
{operand, output_shape},
shape,
"");
});
};
XlaOp XlaBuilder::MhloDynamicBroadcastInDim(
const XlaOp operand, const XlaOp output_dimensions,
absl::Span<const int64_t> broadcast_dimensions, const Shape& output_shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* output_dimensions_shape,
GetShapePtr(output_dimensions));
if (!output_dimensions_shape->IsInteger()) {
return InvalidArgument("output_dimensions must be an integer type %s",
ShapeUtil::HumanString(*output_dimensions_shape));
}
if (output_dimensions_shape->rank() != 1) {
return InvalidArgument("output_dimensions must be rank 1 but got rank %d",
output_dimensions_shape->rank());
}
int64_t operand_rank = operand_shape->rank();
int64_t result_rank = output_shape.rank();
int64_t broadcast_dimensions_size = broadcast_dimensions.size();
if (broadcast_dimensions_size != operand_rank) {
return InvalidArgument(
"broadcast_dimensions size (%d) does not match operand rank (%d)",
broadcast_dimensions_size, operand_rank);
}
if (result_rank < operand_rank) {
return InvalidArgument("result rank (%d) is less than operand rank (%d)",
result_rank, operand_rank);
}
for (int64_t i = 0; i != broadcast_dimensions_size; ++i) {
int64_t dim_index = broadcast_dimensions[i];
if (dim_index < 0 || dim_index >= result_rank) {
return InvalidArgument(
"broadcast_dimensions contains invalid value %d for result with "
"rank %d",
dim_index, result_rank);
}
int64_t dim_size = operand_shape->dimensions(i);
int64_t result_dim_size = output_shape.dimensions(dim_index);
if (dim_size != 1 && dim_size != result_dim_size &&
dim_size != Shape::kUnboundedSize) {
return InvalidArgument(
"size of operand dimension %d (%d) is not compatible with size of "
"result dimension %d (%d)",
i, dim_size, dim_index, result_dim_size);
}
}
return xla::CustomCall(
operand.builder(), "mhlo.dynamic_broadcast_in_dim",
{operand, output_dimensions},
output_shape,
absl::StrCat("{broadcast_dimensions=[",
absl::StrJoin(broadcast_dimensions, ","), "]}"));
});
}
absl::StatusOr<XlaOp> XlaBuilder::InDimBroadcast(
const Shape& shape, XlaOp operand,
absl::Span<const int64_t> broadcast_dimensions) {
TF_RETURN_IF_ERROR(first_error_);
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : broadcast_dimensions) {
instr.add_dimensions(dim);
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_RET_CHECK(!shape.is_unbounded_dynamic())
<< "broadcast op result shapes must be static";
for (int64_t i = 0; i < shape.rank(); i++) {
if (auto it = absl::c_find(broadcast_dimensions, i);
it != broadcast_dimensions.end()) {
TF_RET_CHECK(operand_shape->is_bounded_dynamic_dimension(
it - broadcast_dimensions.begin()) ==
shape.is_bounded_dynamic_dimension(i))
<< " i: " << i << ", shape: " << ShapeUtil::HumanString(shape)
<< ", operand_shape: " << ShapeUtil::HumanString(*operand_shape);
} else {
TF_RET_CHECK(shape.is_static_dimension(i));
}
}
return AddInstruction(std::move(instr), HloOpcode::kBroadcast, {operand});
}
absl::StatusOr<XlaOp> XlaBuilder::AddBroadcastSequence(
const Shape& output_shape, XlaOp operand) {
TF_RETURN_IF_ERROR(first_error_);
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
CHECK(ShapeUtil::IsScalar(*operand_shape) ||
operand_shape->rank() == output_shape.rank());
Shape broadcast_shape =
ShapeUtil::ChangeElementType(output_shape, operand_shape->element_type());
if (ShapeUtil::IsScalar(*operand_shape)) {
return InDimBroadcast(ShapeUtil::MakeStaticShape(broadcast_shape), operand,
{});
}
std::vector<int64_t> broadcast_dimensions;
std::vector<int64_t> reshaped_dimensions;
std::vector<bool> reshaped_dynamic_dimensions;
for (int i = 0; i < operand_shape->rank(); i++) {
if (operand_shape->dimensions(i) == output_shape.dimensions(i)) {
broadcast_dimensions.push_back(i);
reshaped_dimensions.push_back(operand_shape->dimensions(i));
reshaped_dynamic_dimensions.push_back(
operand_shape->is_dynamic_dimension(i));
} else {
TF_RET_CHECK(operand_shape->dimensions(i) == 1 &&
operand_shape->is_static_dimension(i))
<< "An explicit broadcast sequence requires the broadcasted "
"dimensions to be trivial; operand shape: "
<< *operand_shape << "; output_shape: " << output_shape;
}
broadcast_shape.set_dynamic_dimension(
i, operand_shape->is_dynamic_dimension(i));
}
Shape reshaped_shape =
ShapeUtil::MakeShape(operand_shape->element_type(), reshaped_dimensions,
reshaped_dynamic_dimensions);
XlaOp reshaped_operand;
{
XlaScopedShardingAssignment scoped_sharding(this, std::nullopt);
TF_ASSIGN_OR_RETURN(
reshaped_operand,
ReshapeInternal(reshaped_shape, operand, -1));
}
return InDimBroadcast(broadcast_shape, reshaped_operand,
broadcast_dimensions);
}
XlaOp XlaBuilder::UnaryOp(HloOpcode unop, XlaOp operand) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferUnaryOpShape(unop, *operand_shape));
return AddOpWithShape(unop, shape, {operand});
});
}
namespace {
absl::StatusOr<XlaOp> BroadcastToTargetRank(
XlaOp origin, const Shape& origin_shape, const Shape& target_shape,
absl::Span<const int64_t> broadcast_dimensions) {
if (ShapeUtil::IsScalar(origin_shape)) {
return origin;
}
const int64_t origin_rank = origin_shape.rank();
const int64_t target_rank = target_shape.rank();
if (origin_rank >= target_rank) {
return origin;
}
absl::Span<const int64_t> target_dimensions = target_shape.dimensions();
std::vector<int64_t> target_size{target_dimensions.begin(),
target_dimensions.end()};
for (int64_t origin_dim = 0; origin_dim < origin_rank; origin_dim++) {
int64_t target_dim = broadcast_dimensions[origin_dim];
target_size[target_dim] = origin_shape.dimensions(origin_dim);
}
return BroadcastInDim(origin, target_size, broadcast_dimensions);
}
absl::StatusOr<std::vector<XlaOp>> ExtractDimensionSizesAndPadOnesToLeft(
XlaBuilder* builder, XlaOp op, size_t num_dims, int pad_count) {
TF_ASSIGN_OR_RETURN(const Shape* op_shape, builder->GetShapePtr(op));
std::vector<XlaOp> op_dims(
pad_count, ConstantR1<int32_t>(builder, {1}));
for (size_t i = 0; i < num_dims; i++) {
op_dims.push_back(
op_shape->is_static_dimension(i)
? ConstantR1<int32_t>(
builder,
{static_cast<int32_t>(op_shape->dimensions(i))})
: Reshape(GetDimensionSize(op, i), {1}));
}
return op_dims;
}
absl::StatusOr<XlaOp> BroadcastScalarToOutputShapeWithUnbounded(
XlaBuilder* builder, XlaOp scalar, XlaOp output,
const Shape& output_shape) {
TF_ASSIGN_OR_RETURN(const Shape* scalar_shape, builder->GetShapePtr(scalar));
CHECK(ShapeUtil::IsScalar(*scalar_shape));
std::vector<XlaOp> output_sizes(output_shape.rank());
for (size_t i = 0; i < output_shape.rank(); i++) {
output_sizes[i] =
output_shape.is_static_dimension(i)
? ConstantR1<int32_t>(
builder,
{static_cast<int32_t>(output_shape.dimensions(i))})
: Reshape(GetDimensionSize(output, i), {1});
}
return MhloDynamicBroadcastInDim(
scalar, ConcatInDim(builder, output_sizes, 0), {},
output_shape);
}
absl::StatusOr<XlaOp> DegenerateBroadcastWithUnbounded(
XlaBuilder* builder, XlaOp operand, XlaOp output_dimensions,
const Shape& output_shape) {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape,
builder->GetShapePtr(operand));
std::vector<int64_t> broadcast_dimensions(operand_shape->rank());
std::iota(broadcast_dimensions.begin(), broadcast_dimensions.end(),
output_shape.rank() - operand_shape->rank());
return MhloDynamicBroadcastInDim(operand, output_dimensions,
broadcast_dimensions, output_shape);
}
struct UnboundedBroadcastResult {
XlaOp lhs;
XlaOp rhs;
};
absl::StatusOr<UnboundedBroadcastResult> BroadcastToOutputShapeWithUnbounded(
XlaBuilder* builder, XlaOp lhs, const Shape& lhs_shape, XlaOp rhs,
const Shape rhs_shape, const Shape& output_shape,
absl::Span<const int64_t> broadcast_dimensions) {
const int64_t lhs_rank = lhs_shape.rank();
const int64_t rhs_rank = rhs_shape.rank();
const int64_t output_rank = output_shape.rank();
TF_ASSIGN_OR_RETURN(std::vector<XlaOp> lhs_dims,
ExtractDimensionSizesAndPadOnesToLeft(
builder, lhs, lhs_rank, output_rank - lhs_rank));
TF_ASSIGN_OR_RETURN(std::vector<XlaOp> rhs_dims,
ExtractDimensionSizesAndPadOnesToLeft(
builder, rhs, rhs_rank, output_rank - rhs_rank));
XlaOp output_dimensions =
Max(ConcatInDim(builder, lhs_dims, 0), ConcatInDim(builder, rhs_dims, 0));
TF_ASSIGN_OR_RETURN(XlaOp lhs_result,
DegenerateBroadcastWithUnbounded(
builder, lhs, output_dimensions, output_shape));
TF_ASSIGN_OR_RETURN(XlaOp rhs_result,
DegenerateBroadcastWithUnbounded(
builder, rhs, output_dimensions, output_shape));
return UnboundedBroadcastResult{lhs_result, rhs_result};
}
}
XlaOp XlaBuilder::BinaryOp(HloOpcode binop, XlaOp lhs, XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
std::optional<ComparisonDirection> direction,
std::optional<Comparison::Type> type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferBinaryOpShape(
binop, *lhs_shape, *rhs_shape, broadcast_dimensions));
XlaOp updated_lhs = lhs;
XlaOp updated_rhs = rhs;
if (!lhs_shape->is_unbounded_dynamic() &&
!rhs_shape->is_unbounded_dynamic()) {
if (lhs_shape->rank() < shape.rank()) {
TF_ASSIGN_OR_RETURN(updated_lhs,
BroadcastToTargetRank(lhs, *lhs_shape, shape,
broadcast_dimensions));
}
if (rhs_shape->rank() < shape.rank()) {
TF_ASSIGN_OR_RETURN(updated_rhs,
BroadcastToTargetRank(rhs, *rhs_shape, shape,
broadcast_dimensions));
}
TF_ASSIGN_OR_RETURN(const Shape* updated_lhs_shape,
GetShapePtr(updated_lhs));
TF_ASSIGN_OR_RETURN(const Shape* updated_rhs_shape,
GetShapePtr(updated_rhs));
if (!ShapeUtil::SameDimensions(shape, *updated_lhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_lhs,
AddBroadcastSequence(shape, updated_lhs));
}
if (!ShapeUtil::SameDimensions(shape, *updated_rhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_rhs,
AddBroadcastSequence(shape, updated_rhs));
}
} else {
if (ShapeUtil::IsScalar(*lhs_shape) || ShapeUtil::IsScalar(*rhs_shape)) {
if (ShapeUtil::IsScalar(*lhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_lhs,
BroadcastScalarToOutputShapeWithUnbounded(
this, lhs, rhs, *rhs_shape));
}
if (ShapeUtil::IsScalar(*rhs_shape)) {
TF_ASSIGN_OR_RETURN(updated_rhs,
BroadcastScalarToOutputShapeWithUnbounded(
this, rhs, lhs, *lhs_shape));
}
} else {
if (!ShapeUtil::SameDimensions(*lhs_shape, *rhs_shape)) {
Shape output_shape = shape;
output_shape.set_element_type(lhs_shape->element_type());
TF_ASSIGN_OR_RETURN(UnboundedBroadcastResult broadcast_result,
BroadcastToOutputShapeWithUnbounded(
this, lhs, *lhs_shape, rhs, *rhs_shape,
output_shape, broadcast_dimensions));
updated_lhs = broadcast_result.lhs;
updated_rhs = broadcast_result.rhs;
}
}
}
if (binop == HloOpcode::kCompare) {
if (!direction.has_value()) {
return InvalidArgument(
"kCompare expects a ComparisonDirection, but none provided.");
}
if (type == std::nullopt) {
return Compare(shape, updated_lhs, updated_rhs, *direction);
} else {
return Compare(shape, updated_lhs, updated_rhs, *direction, *type);
}
}
if (direction.has_value()) {
return InvalidArgument(
"A comparison direction is provided for a non-compare opcode: %s.",
HloOpcodeString(binop));
}
return BinaryOpNoBroadcast(binop, shape, updated_lhs, updated_rhs);
});
}
XlaOp XlaBuilder::BinaryOpNoBroadcast(HloOpcode binop, const Shape& shape,
XlaOp lhs, XlaOp rhs) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), binop, {lhs, rhs});
});
}
absl::StatusOr<XlaOp> XlaBuilder::Compare(const Shape& shape, XlaOp lhs,
XlaOp rhs,
ComparisonDirection direction) {
TF_ASSIGN_OR_RETURN(auto operand_shape, GetShape(lhs));
return Compare(
shape, lhs, rhs, direction,
Comparison::DefaultComparisonType(operand_shape.element_type()));
}
absl::StatusOr<XlaOp> XlaBuilder::Compare(const Shape& shape, XlaOp lhs,
XlaOp rhs,
ComparisonDirection direction,
Comparison::Type type) {
HloInstructionProto instr;
instr.set_comparison_direction(ComparisonDirectionToString(direction));
instr.set_comparison_type(ComparisonTypeToString(type));
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kCompare, {lhs, rhs});
}
absl::StatusOr<XlaOp> XlaBuilder::BroadcastScalarToOutputShape(XlaOp scalar,
XlaOp output) {
TF_ASSIGN_OR_RETURN(const Shape* scalar_shape, GetShapePtr(scalar));
TF_ASSIGN_OR_RETURN(const Shape* output_shape, GetShapePtr(output));
XlaOp updated_output = scalar;
if (output_shape->is_unbounded_dynamic()) {
Shape output_shape_copy = *output_shape;
output_shape_copy.set_element_type(scalar_shape->element_type());
TF_ASSIGN_OR_RETURN(updated_output,
BroadcastScalarToOutputShapeWithUnbounded(
this, scalar, output, output_shape_copy));
return updated_output;
}
TF_ASSIGN_OR_RETURN(updated_output,
AddBroadcastSequence(*output_shape, updated_output));
return updated_output;
}
XlaOp XlaBuilder::TernaryOp(HloOpcode triop, XlaOp lhs, XlaOp rhs, XlaOp ehs) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
XlaOp updated_lhs = lhs;
XlaOp updated_rhs = rhs;
XlaOp updated_ehs = ehs;
if (triop == HloOpcode::kSelect || triop == HloOpcode::kClamp) {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(const Shape* ehs_shape, GetShapePtr(ehs));
TF_ASSIGN_OR_RETURN(
std::optional<Shape> output_shape,
ShapeInference::InferScalarBroadcastShape(
absl::Span<const Shape>({*lhs_shape, *rhs_shape, *ehs_shape})));
if (output_shape.has_value()) {
if (ShapeUtil::IsScalar(*lhs_shape)) {
TF_ASSIGN_OR_RETURN(
updated_lhs,
BroadcastScalarToOutputShape(
lhs,
ShapeUtil::Equal(*output_shape, *rhs_shape) ? rhs : ehs));
}
if (ShapeUtil::IsScalar(*rhs_shape)) {
TF_ASSIGN_OR_RETURN(
updated_rhs,
BroadcastScalarToOutputShape(
rhs,
ShapeUtil::Equal(*output_shape, *lhs_shape) ? lhs : ehs));
}
if (ShapeUtil::IsScalar(*ehs_shape)) {
TF_ASSIGN_OR_RETURN(
updated_ehs,
BroadcastScalarToOutputShape(
ehs,
ShapeUtil::Equal(*output_shape, *lhs_shape) ? lhs : rhs));
}
}
}
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(updated_lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(updated_rhs));
TF_ASSIGN_OR_RETURN(const Shape* ehs_shape, GetShapePtr(updated_ehs));
TF_ASSIGN_OR_RETURN(const Shape inferred_shape,
ShapeInference::InferTernaryOpShape(
triop, *lhs_shape, *rhs_shape, *ehs_shape));
return AddOpWithShape(triop, inferred_shape,
{updated_lhs, updated_rhs, updated_ehs});
});
}
XlaOp XlaBuilder::ConstantLiteral(const LiteralSlice& literal) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (literal.shape().IsArray() && literal.element_count() > 1 &&
literal.IsAllFirst()) {
Literal scalar = LiteralUtil::GetFirstScalarLiteral(literal);
HloInstructionProto instr;
*instr.mutable_shape() = scalar.shape().ToProto();
*instr.mutable_literal() = scalar.ToProto();
XlaOp scalar_op;
{
XlaScopedShardingAssignment scoped_sharding(this, std::nullopt);
TF_ASSIGN_OR_RETURN(
scalar_op, AddInstruction(std::move(instr), HloOpcode::kConstant));
}
return Broadcast(scalar_op, literal.shape().dimensions());
} else {
HloInstructionProto instr;
*instr.mutable_shape() = literal.shape().ToProto();
*instr.mutable_literal() = literal.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kConstant);
}
});
}
XlaOp XlaBuilder::Iota(const Shape& shape, int64_t iota_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!shape.is_static()) {
return InvalidArgument(
"The output of iota must not have dynamic dimensions: %s",
ShapeUtil::HumanString(shape));
}
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(iota_dimension);
return AddInstruction(std::move(instr), HloOpcode::kIota);
});
}
XlaOp XlaBuilder::Iota(PrimitiveType type, int64_t size) {
return Iota(ShapeUtil::MakeShape(type, {size}), 0);
}
XlaOp XlaBuilder::Call(const XlaComputation& computation,
absl::Span<const XlaOp> operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferCallShape(
operand_shape_ptrs,
called_program_shape));
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kCall, operands);
});
}
XlaOp XlaBuilder::CompositeCall(const XlaComputation& computation,
absl::Span<const XlaOp> operands,
const std::string& name,
std::optional<absl::string_view> attributes,
std::optional<int64_t> version) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferCallShape(
operand_shape_ptrs,
called_program_shape));
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(computation, &instr);
instr.set_is_composite(true);
TF_ASSIGN_OR_RETURN(
XlaOp instruction,
AddInstruction(std::move(instr), HloOpcode::kCall, operands));
TF_RETURN_IF_ERROR(
SetInstructionFrontendAttribute(instruction, "composite.name", name));
TF_RETURN_IF_ERROR(SetInstructionFrontendAttribute(
instruction, "composite.attributes",
attributes.has_value() ? std::string(*attributes) : "{}"));
TF_RETURN_IF_ERROR(SetInstructionFrontendAttribute(
instruction, "composite.version",
version.has_value() ? std::to_string(*version) : "0"));
return instruction;
});
}
XlaOp XlaBuilder::Parameter(
int64_t parameter_number, const Shape& shape, const std::string& name,
const std::vector<bool>& replicated_at_leaf_buffers) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
if (!parameter_numbers_.insert(parameter_number).second) {
return InvalidArgument("parameter %d already registered",
parameter_number);
}
instr.set_parameter_number(parameter_number);
instr.set_name(name);
*instr.mutable_shape() = shape.ToProto();
if (!replicated_at_leaf_buffers.empty()) {
auto replication = instr.mutable_parameter_replication();
for (bool replicated : replicated_at_leaf_buffers) {
replication->add_replicated_at_leaf_buffers(replicated);
}
}
return AddInstruction(std::move(instr), HloOpcode::kParameter);
});
}
XlaOp XlaBuilder::Broadcast(XlaOp operand,
absl::Span<const int64_t> broadcast_sizes) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(
const Shape& shape,
ShapeInference::InferBroadcastShape(*operand_shape, broadcast_sizes));
const int64_t operand_rank = operand_shape->rank();
std::vector<int64_t> dimensions(operand_rank);
for (int i = 0; i < operand_rank; ++i) {
dimensions[i] = i + shape.rank() - operand_rank;
}
return InDimBroadcast(shape, operand, dimensions);
});
}
XlaOp XlaBuilder::BroadcastInDim(
XlaOp operand, absl::Span<const int64_t> out_dim_size,
absl::Span<const int64_t> broadcast_dimensions) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(auto output_shape,
ShapeUtil::MakeValidatedShape(
operand_shape->element_type(), out_dim_size));
TF_RET_CHECK(!output_shape.is_unbounded_dynamic())
<< "BroadcastInDim output must shape be static or bounded dynamic "
<< ShapeUtil::HumanString(output_shape);
int64_t broadcast_rank = broadcast_dimensions.size();
if (operand_shape->rank() != broadcast_rank) {
return InvalidArgument(
"Size of broadcast_dimensions has to match operand's rank; operand "
"rank: %lld, size of broadcast_dimensions %u.",
operand_shape->rank(), broadcast_dimensions.size());
}
for (int i = 0; i < broadcast_rank; i++) {
const int64_t num_dims = out_dim_size.size();
if (broadcast_dimensions[i] < 0 || broadcast_dimensions[i] > num_dims) {
return InvalidArgument("Broadcast dimension %lld is out of bound",
broadcast_dimensions[i]);
}
output_shape.set_dynamic_dimension(
broadcast_dimensions[i],
operand_shape->is_bounded_dynamic_dimension(i));
}
TF_RETURN_IF_ERROR(ShapeInference::InferBroadcastShape(
*operand_shape, output_shape, broadcast_dimensions)
.status());
std::vector<int64_t> in_dim_size(out_dim_size.begin(), out_dim_size.end());
std::vector<bool> in_dim_dynamic(out_dim_size.size(), false);
for (int i = 0; i < broadcast_rank; i++) {
in_dim_size[broadcast_dimensions[i]] =
(operand_shape->is_unbounded_dynamic_dimension(i))
? out_dim_size[broadcast_dimensions[i]]
: operand_shape->dimensions(i);
in_dim_dynamic[broadcast_dimensions[i]] =
operand_shape->is_bounded_dynamic_dimension(i);
}
const auto& in_dim_shape = ShapeUtil::MakeShape(
operand_shape->element_type(), in_dim_size, in_dim_dynamic);
TF_ASSIGN_OR_RETURN(
XlaOp in_dim_broadcast,
InDimBroadcast(in_dim_shape, operand, broadcast_dimensions));
if (ShapeUtil::Equal(in_dim_shape, output_shape)) {
return in_dim_broadcast;
}
return AddBroadcastSequence(output_shape, in_dim_broadcast);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ReshapeInternal(const Shape& shape,
XlaOp operand,
int64_t inferred_dimension) {
TF_RETURN_IF_ERROR(first_error_);
if (shape.is_unbounded_dynamic()) {
return InvalidArgument(
"Reshaping with unbounded result shape is not supported.");
}
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
if (inferred_dimension != -1) {
instr.add_dimensions(inferred_dimension);
}
return AddInstruction(std::move(instr), HloOpcode::kReshape, {operand});
}
XlaOp XlaBuilder::Slice(XlaOp operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferSliceShape(
*operand_shape, start_indices,
limit_indices, strides));
return SliceInternal(shape, operand, start_indices, limit_indices, strides);
});
}
absl::StatusOr<XlaOp> XlaBuilder::SliceInternal(
const Shape& shape, XlaOp operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int i = 0, end = start_indices.size(); i < end; i++) {
auto* slice_config = instr.add_slice_dimensions();
slice_config->set_start(start_indices[i]);
slice_config->set_limit(limit_indices[i]);
slice_config->set_stride(strides[i]);
}
return AddInstruction(std::move(instr), HloOpcode::kSlice, {operand});
}
XlaOp XlaBuilder::SliceInDim(XlaOp operand, int64_t start_index,
int64_t limit_index, int64_t stride,
int64_t dimno) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
std::vector<int64_t> starts(shape->rank(), 0);
std::vector<int64_t> limits(shape->dimensions().begin(),
shape->dimensions().end());
std::vector<int64_t> strides(shape->rank(), 1);
starts[dimno] = start_index;
limits[dimno] = limit_index;
strides[dimno] = stride;
return Slice(operand, starts, limits, strides);
});
}
XlaOp XlaBuilder::DynamicSlice(XlaOp operand,
absl::Span<const XlaOp> start_indices,
absl::Span<const int64_t> slice_sizes) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> start_indices_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& start_indices_shapes,
GetOperandShapes(start_indices));
absl::c_transform(start_indices_shapes,
std::back_inserter(start_indices_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferDynamicSliceShape(
*operand_shape, start_indices_shapes, slice_sizes));
return DynamicSliceInternal(shape, operand, start_indices, slice_sizes);
});
}
absl::StatusOr<XlaOp> XlaBuilder::DynamicSliceInternal(
const Shape& shape, XlaOp operand, absl::Span<const XlaOp> start_indices,
absl::Span<const int64_t> slice_sizes) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t size : slice_sizes) {
instr.add_dynamic_slice_sizes(size);
}
std::vector<XlaOp> operands = {operand};
operands.insert(operands.end(), start_indices.begin(), start_indices.end());
return AddInstruction(std::move(instr), HloOpcode::kDynamicSlice, operands);
}
XlaOp XlaBuilder::DynamicUpdateSlice(XlaOp operand, XlaOp update,
absl::Span<const XlaOp> start_indices) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* update_shape, GetShapePtr(update));
std::vector<const Shape*> start_indices_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& start_indices_shapes,
GetOperandShapes(start_indices));
absl::c_transform(start_indices_shapes,
std::back_inserter(start_indices_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferDynamicUpdateSliceShape(
*operand_shape, *update_shape, start_indices_shapes));
return DynamicUpdateSliceInternal(shape, operand, update, start_indices);
});
}
absl::StatusOr<XlaOp> XlaBuilder::DynamicUpdateSliceInternal(
const Shape& shape, XlaOp operand, XlaOp update,
absl::Span<const XlaOp> start_indices) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
std::vector<XlaOp> operands = {operand, update};
operands.insert(operands.end(), start_indices.begin(), start_indices.end());
return AddInstruction(std::move(instr), HloOpcode::kDynamicUpdateSlice,
operands);
}
XlaOp XlaBuilder::ConcatInDim(absl::Span<const XlaOp> operands,
int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferConcatOpShape(
operand_shape_ptrs, dimension));
return ConcatInDimInternal(shape, operands, dimension);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ConcatInDimInternal(
const Shape& shape, absl::Span<const XlaOp> operands, int64_t dimension) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(dimension);
return AddInstruction(std::move(instr), HloOpcode::kConcatenate, operands);
}
XlaOp XlaBuilder::Pad(XlaOp operand, XlaOp padding_value,
const PaddingConfig& padding_config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* padding_value_shape,
GetShapePtr(padding_value));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferPadShape(
*operand_shape, *padding_value_shape, padding_config));
return PadInternal(shape, operand, padding_value, padding_config);
});
}
XlaOp XlaBuilder::PadInDim(XlaOp operand, XlaOp padding_value, int64_t dimno,
int64_t pad_lo, int64_t pad_hi) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
PaddingConfig padding_config = MakeNoPaddingConfig(shape->rank());
auto* dims = padding_config.mutable_dimensions(dimno);
dims->set_edge_padding_low(pad_lo);
dims->set_edge_padding_high(pad_hi);
return Pad(operand, padding_value, padding_config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::PadInternal(
const Shape& shape, XlaOp operand, XlaOp padding_value,
const PaddingConfig& padding_config) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_padding_config() = padding_config;
return AddInstruction(std::move(instr), HloOpcode::kPad,
{operand, padding_value});
}
XlaOp XlaBuilder::Reshape(XlaOp operand, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> new_sizes,
int64_t inferred_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape shape, ShapeInference::InferReshapeShape(
*operand_shape, dimensions,
new_sizes, inferred_dimension));
XlaOp transposed = IsIdentityPermutation(dimensions)
? operand
: Transpose(operand, dimensions);
return ReshapeInternal(shape, transposed, inferred_dimension);
});
}
XlaOp XlaBuilder::Reshape(XlaOp operand, absl::Span<const int64_t> new_sizes,
int64_t inferred_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
std::vector<int64_t> dimensions(shape->dimensions_size());
std::iota(dimensions.begin(), dimensions.end(), 0);
return Reshape(operand, dimensions, new_sizes, inferred_dimension);
});
}
XlaOp XlaBuilder::Reshape(const Shape& shape, XlaOp operand,
int64_t inferred_dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
return ReshapeInternal(shape, operand, inferred_dimension);
});
}
XlaOp XlaBuilder::DynamicReshape(XlaOp operand,
absl::Span<const XlaOp> dim_sizes,
absl::Span<const int64_t> new_size_bounds,
const std::vector<bool>& dims_are_dynamic) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> dim_size_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& dim_size_shapes,
GetOperandShapes(dim_sizes));
absl::c_transform(dim_size_shapes, std::back_inserter(dim_size_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const Shape shape,
ShapeInference::InferDynamicReshapeShape(
*operand_shape, dim_size_shape_ptrs,
new_size_bounds, dims_are_dynamic));
TF_RETURN_IF_ERROR(first_error_);
std::vector<XlaOp> operands;
operands.reserve(1 + dim_sizes.size());
operands.push_back(operand);
for (const XlaOp& dim_size : dim_sizes) {
operands.push_back(dim_size);
}
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kDynamicReshape,
operands);
});
}
XlaOp XlaBuilder::Collapse(XlaOp operand,
absl::Span<const int64_t> dimensions) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (dimensions.size() <= 1) {
return operand;
}
for (absl::Span<const int64_t>::size_type i = 1; i < dimensions.size();
++i) {
if (dimensions[i] - 1 != dimensions[i - 1]) {
return InvalidArgument(
"Collapsed dimensions are not in consecutive order.");
}
}
TF_ASSIGN_OR_RETURN(const Shape* original_shape, GetShapePtr(operand));
VLOG(3) << "original shape: " << ShapeUtil::HumanString(*original_shape);
VLOG(3) << "dims to collapse: " << absl::StrJoin(dimensions, ",");
std::vector<int64_t> new_sizes;
for (int i = 0; i < original_shape->rank(); ++i) {
if (i <= dimensions.front() || i > dimensions.back()) {
new_sizes.push_back(original_shape->dimensions(i));
} else {
new_sizes.back() *= original_shape->dimensions(i);
}
}
VLOG(3) << "new sizes: [" << absl::StrJoin(new_sizes, ",") << "]";
return Reshape(operand, new_sizes);
});
}
static absl::StatusOr<XlaComputation> PassthroughComputation(
const Shape& shape) {
XlaBuilder builder("dummy");
XlaOp out = Parameter(&builder, 0, shape, "p");
return builder.Build(out);
}
XlaOp XlaBuilder::Select(XlaOp pred, XlaOp on_true, XlaOp on_false) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* true_shape, GetShapePtr(on_true));
TF_ASSIGN_OR_RETURN(const Shape* false_shape, GetShapePtr(on_false));
TF_RET_CHECK(true_shape->IsTuple() == false_shape->IsTuple());
if (true_shape->IsTuple()) {
TF_ASSIGN_OR_RETURN(XlaComputation passthrough_true,
PassthroughComputation(*true_shape));
TF_ASSIGN_OR_RETURN(XlaComputation passthrough_false,
PassthroughComputation(*false_shape));
return Conditional(pred, on_true, passthrough_true, on_false,
passthrough_false);
}
return TernaryOp(HloOpcode::kSelect, pred, on_true, on_false);
});
}
XlaOp XlaBuilder::Tuple(absl::Span<const XlaOp> elements) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(elements));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const Shape shape,
ShapeInference::InferVariadicOpShape(
HloOpcode::kTuple, operand_shape_ptrs));
return TupleInternal(shape, elements);
});
}
absl::StatusOr<XlaOp> XlaBuilder::TupleInternal(
const Shape& shape, absl::Span<const XlaOp> elements) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kTuple, elements);
}
XlaOp XlaBuilder::GetTupleElement(XlaOp tuple_data, int64_t index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* tuple_shape, GetShapePtr(tuple_data));
if (!tuple_shape->IsTuple()) {
return InvalidArgument(
"Operand to GetTupleElement() is not a tuple; got %s",
ShapeUtil::HumanString(*tuple_shape));
}
if (index < 0 || index >= ShapeUtil::TupleElementCount(*tuple_shape)) {
return InvalidArgument(
"GetTupleElement() index (%d) out of range for tuple shape %s", index,
ShapeUtil::HumanString(*tuple_shape));
}
return GetTupleElementInternal(
ShapeUtil::GetTupleElementShape(*tuple_shape, index), tuple_data,
index);
});
}
absl::StatusOr<XlaOp> XlaBuilder::GetTupleElementInternal(const Shape& shape,
XlaOp tuple_data,
int64_t index) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_tuple_index(index);
return AddInstruction(std::move(instr), HloOpcode::kGetTupleElement,
{tuple_data});
}
XlaOp XlaBuilder::Dot(XlaOp lhs, XlaOp rhs,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
DotDimensionNumbers dimension_numbers;
dimension_numbers.add_lhs_contracting_dimensions(
lhs_shape->dimensions_size() == 1 ? 0 : 1);
dimension_numbers.add_rhs_contracting_dimensions(0);
return DotGeneral(lhs, rhs, dimension_numbers, precision_config,
preferred_element_type);
});
}
XlaOp XlaBuilder::DotGeneral(
XlaOp lhs, XlaOp rhs, const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferDotOpShape(
*lhs_shape, *rhs_shape, dimension_numbers, preferred_element_type));
return DotGeneralInternal(shape, lhs, rhs, dimension_numbers,
precision_config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::DotGeneralInternal(
const Shape& shape, XlaOp lhs, XlaOp rhs,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_dot_dimension_numbers() = dimension_numbers;
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
return AddInstruction(std::move(instr), HloOpcode::kDot, {lhs, rhs});
}
XlaOp XlaBuilder::SparseDot(
XlaOp lhs, XlaOp rhs, absl::Span<const XlaOp> sparse_meta,
absl::Span<const SparsityDescriptor> sparsity,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferDotOpShape(
*lhs_shape, *rhs_shape, dimension_numbers,
preferred_element_type, sparsity));
std::vector<XlaOp> operands{lhs, rhs};
operands.insert(operands.end(), sparse_meta.begin(), sparse_meta.end());
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_dot_dimension_numbers() = dimension_numbers;
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
for (const SparsityDescriptor& descriptor : sparsity) {
*instr.add_dot_sparsity() = descriptor;
}
return AddInstruction(std::move(instr), HloOpcode::kDot, operands);
});
}
absl::Status XlaBuilder::VerifyConvolution(
const Shape& lhs_shape, const Shape& rhs_shape,
const ConvolutionDimensionNumbers& dimension_numbers) const {
if (lhs_shape.rank() != rhs_shape.rank()) {
return InvalidArgument(
"Convolution arguments must have same number of "
"dimensions. Got: %s and %s",
ShapeUtil::HumanString(lhs_shape), ShapeUtil::HumanString(rhs_shape));
}
int num_dims = lhs_shape.rank();
if (num_dims < 2) {
return InvalidArgument(
"Convolution expects argument arrays with >= 3 dimensions. "
"Got: %s and %s",
ShapeUtil::HumanString(lhs_shape), ShapeUtil::HumanString(rhs_shape));
}
int num_spatial_dims = num_dims - 2;
const auto check_spatial_dimensions =
[&](absl::string_view field_name,
absl::Span<const int64_t> numbers) -> absl::Status {
if (numbers.size() != num_spatial_dims) {
return InvalidArgument("Expected %d elements for %s, but got %d.",
num_spatial_dims, field_name, numbers.size());
}
for (int i = 0; i < numbers.size(); ++i) {
if (numbers[i] < 0 || numbers[i] >= num_dims) {
return InvalidArgument("Convolution %s[%d] is out of bounds: %d",
field_name, i, numbers[i]);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
check_spatial_dimensions("input_spatial_dimensions",
dimension_numbers.input_spatial_dimensions()));
TF_RETURN_IF_ERROR(
check_spatial_dimensions("kernel_spatial_dimensions",
dimension_numbers.kernel_spatial_dimensions()));
return check_spatial_dimensions(
"output_spatial_dimensions",
dimension_numbers.output_spatial_dimensions());
}
XlaOp XlaBuilder::Conv(XlaOp lhs, XlaOp rhs,
absl::Span<const int64_t> window_strides,
Padding padding, int64_t feature_group_count,
int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding,
CreateDefaultConvDimensionNumbers(window_strides.size()),
feature_group_count, batch_group_count, precision_config,
preferred_element_type);
}
XlaOp XlaBuilder::ConvWithGeneralPadding(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ConvGeneral(lhs, rhs, window_strides, padding,
CreateDefaultConvDimensionNumbers(window_strides.size()),
feature_group_count, batch_group_count, precision_config,
preferred_element_type);
}
XlaOp XlaBuilder::ConvWithGeneralDimensions(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_RETURN_IF_ERROR(
VerifyConvolution(*lhs_shape, *rhs_shape, dimension_numbers));
std::vector<int64_t> base_area_dimensions(
dimension_numbers.input_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < base_area_dimensions.size();
++i) {
base_area_dimensions[i] =
lhs_shape->dimensions(dimension_numbers.input_spatial_dimensions(i));
}
std::vector<int64_t> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < window_dimensions.size();
++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
return ConvGeneral(lhs, rhs, window_strides,
MakePadding(base_area_dimensions, window_dimensions,
window_strides, padding),
dimension_numbers, feature_group_count,
batch_group_count, precision_config,
preferred_element_type);
});
}
XlaOp XlaBuilder::ConvGeneral(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return ConvGeneralDilated(lhs, rhs, window_strides, padding, {}, {},
dimension_numbers, feature_group_count,
batch_group_count, precision_config,
preferred_element_type);
}
XlaOp XlaBuilder::ConvGeneralDilated(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::optional<std::vector<bool>> window_reversal) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
TF_RETURN_IF_ERROR(
VerifyConvolution(*lhs_shape, *rhs_shape, dimension_numbers));
std::vector<int64_t> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < window_dimensions.size();
++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
TF_ASSIGN_OR_RETURN(Window window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
lhs_dilation, rhs_dilation, window_reversal));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferConvolveShape(
*lhs_shape, *rhs_shape, feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
return ConvGeneralDilatedInternal(shape, lhs, rhs, window, window_strides,
padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count,
batch_group_count, precision_config);
});
}
absl::StatusOr<HloInstructionProto> XlaBuilder::DynamicConvInstruction(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
TF_ASSIGN_OR_RETURN(const Shape* lhs_shape, GetShapePtr(lhs));
TF_ASSIGN_OR_RETURN(const Shape* rhs_shape, GetShapePtr(rhs));
std::vector<int64_t> window_dimensions(
dimension_numbers.kernel_spatial_dimensions_size());
for (std::vector<int64_t>::size_type i = 0; i < window_dimensions.size();
++i) {
window_dimensions[i] =
rhs_shape->dimensions(dimension_numbers.kernel_spatial_dimensions(i));
}
TF_ASSIGN_OR_RETURN(Window window, ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides,
padding, lhs_dilation, rhs_dilation));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferConvolveShape(
*lhs_shape, *rhs_shape, feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = window;
*instr.mutable_convolution_dimension_numbers() = dimension_numbers;
instr.set_feature_group_count(feature_group_count);
instr.set_batch_group_count(batch_group_count);
instr.set_padding_type(padding_type);
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
return std::move(instr);
}
XlaOp XlaBuilder::DynamicConvInputGrad(
XlaOp input_sizes, XlaOp lhs, XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
DynamicConvInstruction(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type));
instr.set_custom_call_target("DynamicConvolutionInputGrad");
return AddInstruction(std::move(instr), HloOpcode::kCustomCall,
{input_sizes, lhs, rhs});
});
}
XlaOp XlaBuilder::DynamicConvKernelGrad(
XlaOp activations, XlaOp gradients,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
DynamicConvInstruction(activations, gradients, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision_config, padding_type,
preferred_element_type));
instr.set_custom_call_target("DynamicConvolutionKernelGrad");
instr.mutable_shape()->clear_is_dynamic_dimension();
return AddInstruction(std::move(instr), HloOpcode::kCustomCall,
{activations, gradients});
});
}
XlaOp XlaBuilder::DynamicConvForward(
XlaOp lhs, XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
DynamicConvInstruction(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type));
instr.set_custom_call_target("DynamicConvolutionForward");
return AddInstruction(std::move(instr), HloOpcode::kCustomCall, {lhs, rhs});
});
}
absl::StatusOr<XlaOp> XlaBuilder::ConvGeneralDilatedInternal(
const Shape& shape, XlaOp lhs, XlaOp rhs, const Window& window,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = window;
*instr.mutable_convolution_dimension_numbers() = dimension_numbers;
instr.set_feature_group_count(feature_group_count);
instr.set_batch_group_count(batch_group_count);
if (precision_config != nullptr) {
*instr.mutable_precision_config() = *precision_config;
}
return AddInstruction(std::move(instr), HloOpcode::kConvolution, {lhs, rhs});
}
XlaOp XlaBuilder::Fft(XlaOp operand, const FftType fft_type,
const absl::Span<const int64_t> fft_length) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferFftShape(
*operand_shape, fft_type, fft_length));
return FftInternal(shape, operand, fft_type, fft_length);
});
}
absl::StatusOr<XlaOp> XlaBuilder::FftInternal(
const Shape& shape, XlaOp operand, const FftType fft_type,
const absl::Span<const int64_t> fft_length) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_fft_type(fft_type);
for (int64_t i : fft_length) {
instr.add_fft_length(i);
}
return AddInstruction(std::move(instr), HloOpcode::kFft, {operand});
}
absl::StatusOr<XlaOp> XlaBuilder::TriangularSolveInternal(
const Shape& shape, XlaOp a, XlaOp b, TriangularSolveOptions options) {
HloInstructionProto instr;
*instr.mutable_triangular_solve_options() = std::move(options);
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kTriangularSolve, {a, b});
}
absl::StatusOr<XlaOp> XlaBuilder::CholeskyInternal(const Shape& shape, XlaOp a,
bool lower) {
HloInstructionProto instr;
CholeskyOptions& options = *instr.mutable_cholesky_options();
options.set_lower(lower);
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kCholesky, {a});
}
XlaOp XlaBuilder::Infeed(const Shape& shape, const std::string& config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Given shape to Infeed must have a layout");
}
const Shape infeed_instruction_shape =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
*instr.mutable_shape() = infeed_instruction_shape.ToProto();
instr.set_infeed_config(config);
if (shape.IsArray() && sharding() &&
sharding()->type() == OpSharding::OTHER) {
return InvalidArgument(
"Tiled sharding is not yet supported for array-shaped infeeds");
}
if (sharding() && sharding()->type() == OpSharding::REPLICATED) {
return InvalidArgument(
"Replicated sharding is not yet supported for infeeds");
}
XlaOp token;
auto make_token = [&]() {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(token_instr), HloOpcode::kAfterAll, {});
};
if (sharding()) {
OpSharding sharding = sharding_builder::AssignDevice(0);
XlaScopedShardingAssignment scoped_sharding(this, sharding);
TF_ASSIGN_OR_RETURN(token, make_token());
} else {
TF_ASSIGN_OR_RETURN(token, make_token());
}
XlaOp infeed;
if (sharding() && sharding()->type() == OpSharding::TUPLE) {
OpSharding infeed_instruction_sharding = *sharding();
*infeed_instruction_sharding.add_tuple_shardings() =
sharding_builder::AssignDevice(0);
XlaScopedShardingAssignment scoped_sharding(this,
infeed_instruction_sharding);
TF_ASSIGN_OR_RETURN(infeed, AddInstruction(std::move(instr),
HloOpcode::kInfeed, {token}));
} else {
TF_ASSIGN_OR_RETURN(infeed, AddInstruction(std::move(instr),
HloOpcode::kInfeed, {token}));
}
HloInstructionProto infeed_data;
*infeed_data.mutable_shape() = shape.ToProto();
infeed_data.set_tuple_index(0);
return AddInstruction(std::move(infeed_data), HloOpcode::kGetTupleElement,
{infeed});
});
}
XlaOp XlaBuilder::InfeedWithToken(XlaOp token, const Shape& shape,
const std::string& config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Given shape to Infeed must have a layout");
}
const Shape infeed_instruction_shape =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
if (shape.IsArray() && sharding() &&
sharding()->type() == OpSharding::OTHER) {
return InvalidArgument(
"Tiled sharding is not yet supported for array-shaped infeeds");
}
if (sharding() && sharding()->type() == OpSharding::REPLICATED) {
return InvalidArgument(
"Replicated sharding is not yet supported for infeeds");
}
return InfeedWithTokenInternal(infeed_instruction_shape, token, config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::InfeedWithTokenInternal(
const Shape& infeed_instruction_shape, XlaOp token,
const std::string& config) {
HloInstructionProto instr;
*instr.mutable_shape() = infeed_instruction_shape.ToProto();
instr.set_infeed_config(config);
return AddInstruction(std::move(instr), HloOpcode::kInfeed, {token});
}
void XlaBuilder::Outfeed(XlaOp operand, const Shape& shape_with_layout,
const std::string& outfeed_config) {
ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
if (!LayoutUtil::HasLayout(shape_with_layout)) {
return InvalidArgument("Given shape to Outfeed must have a layout");
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!ShapeUtil::Compatible(*operand_shape, shape_with_layout)) {
return InvalidArgument(
"Outfeed shape %s must be compatible with operand shape %s",
ShapeUtil::HumanStringWithLayout(shape_with_layout),
ShapeUtil::HumanStringWithLayout(*operand_shape));
}
*instr.mutable_outfeed_shape() = shape_with_layout.ToProto();
instr.set_outfeed_config(outfeed_config);
XlaOp token;
auto make_token = [&]() {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(token_instr), HloOpcode::kAfterAll, {});
};
auto make_outfeed = [&](XlaOp token) {
return AddInstruction(std::move(instr), HloOpcode::kOutfeed,
{operand, token});
};
if (sharding()) {
XlaScopedShardingAssignment scoped_sharding(
this, sharding_builder::AssignDevice(0));
TF_ASSIGN_OR_RETURN(token, make_token());
} else {
TF_ASSIGN_OR_RETURN(token, make_token());
}
if (sharding()) {
OpSharding tuple_sharding = *sharding();
if (tuple_sharding.type() != OpSharding::TUPLE) {
tuple_sharding = sharding_builder::Tuple({});
*tuple_sharding.add_tuple_shardings() = *sharding();
}
*tuple_sharding.add_tuple_shardings() = sharding_builder::AssignDevice(0);
XlaScopedShardingAssignment scoped_sharding(this, tuple_sharding);
TF_RETURN_IF_ERROR(make_outfeed(token).status());
} else {
TF_RETURN_IF_ERROR(make_outfeed(token).status());
}
HloInstructionProto tuple_instr;
*tuple_instr.mutable_shape() = ShapeUtil::MakeNil().ToProto();
{
XlaScopedShardingAssignment scoped_sharding(this, std::nullopt);
TF_ASSIGN_OR_RETURN(
XlaOp empty_tuple,
AddInstruction(std::move(tuple_instr), HloOpcode::kTuple, {}));
return empty_tuple;
}
});
}
XlaOp XlaBuilder::OutfeedWithToken(XlaOp operand, XlaOp token,
const Shape& shape_with_layout,
const std::string& outfeed_config) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape_with_layout)) {
return InvalidArgument("Given shape to Outfeed must have a layout");
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!ShapeUtil::Compatible(*operand_shape, shape_with_layout)) {
return InvalidArgument(
"Outfeed shape %s must be compatible with operand shape %s",
ShapeUtil::HumanStringWithLayout(shape_with_layout),
ShapeUtil::HumanStringWithLayout(*operand_shape));
}
return OutfeedWithTokenInternal(operand, token, shape_with_layout,
outfeed_config);
});
}
absl::StatusOr<XlaOp> XlaBuilder::OutfeedWithTokenInternal(
XlaOp operand, XlaOp token, const Shape& shape_with_layout,
const std::string& outfeed_config) {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
*instr.mutable_outfeed_shape() = shape_with_layout.ToProto();
instr.set_outfeed_config(outfeed_config);
return AddInstruction(std::move(instr), HloOpcode::kOutfeed,
{operand, token});
}
XlaOp XlaBuilder::CreateToken() {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(instr), HloOpcode::kAfterAll);
});
}
XlaOp XlaBuilder::AfterAll(absl::Span<const XlaOp> tokens) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
for (int i = 0, end = tokens.size(); i < end; ++i) {
XlaOp operand = tokens[i];
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!operand_shape->IsToken()) {
return InvalidArgument(
"All operands to AfterAll must be tokens; operand %d has shape %s",
i, ShapeUtil::HumanString(*operand_shape));
}
}
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
return AddInstruction(std::move(instr), HloOpcode::kAfterAll, tokens);
});
}
XlaOp XlaBuilder::CustomCall(
const std::string& call_target_name, absl::Span<const XlaOp> operands,
const Shape& shape, const std::string& opaque,
std::optional<absl::Span<const Shape>> operand_shapes_with_layout,
bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, std::optional<Window> window,
std::optional<ConvolutionDimensionNumbers> dnums,
CustomCallSchedule schedule, CustomCallApiVersion api_version) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (absl::StartsWith(call_target_name, "$")) {
return InvalidArgument(
"Invalid custom_call_target \"%s\": Call targets that start with '$' "
"are reserved for internal use.",
call_target_name);
}
if (operand_shapes_with_layout.has_value()) {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument(
"Result shape must have layout for custom call with constrained "
"layout.");
}
if (operands.size() != operand_shapes_with_layout->size()) {
return InvalidArgument(
"Must specify a shape with layout for each operand for custom call "
"with constrained layout; given %d shapes, expected %d",
operand_shapes_with_layout->size(), operands.size());
}
int64_t operand_num = 0;
for (const Shape& operand_shape : *operand_shapes_with_layout) {
if (!LayoutUtil::HasLayout(operand_shape)) {
return InvalidArgument(
"No layout specified for operand %d for custom call with "
"constrained layout.",
operand_num);
}
++operand_num;
}
}
return CustomCallInternal(
call_target_name, operands, nullptr, shape, opaque,
operand_shapes_with_layout, has_side_effect, output_operand_aliasing,
literal, window, dnums, schedule, api_version);
});
}
absl::StatusOr<XlaOp> XlaBuilder::CustomCallInternal(
const std::string& call_target_name, absl::Span<const XlaOp> operands,
const XlaComputation* computation, const Shape& shape,
const std::string& opaque,
std::optional<absl::Span<const Shape>> operand_shapes_with_layout,
bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, std::optional<Window> window,
std::optional<ConvolutionDimensionNumbers> dnums,
CustomCallSchedule schedule, CustomCallApiVersion api_version) {
HloInstructionProto instr;
if (call_target_name == "__cudnn$convForward") {
instr.set_name("cudnn-conv");
} else if (call_target_name == "__cudnn$convBackwardInput") {
instr.set_name("cudnn-conv-bw-input");
} else if (call_target_name == "__cudnn$convBackwardFilter") {
instr.set_name("cudnn-conv-bw-filter");
} else if (call_target_name == "__cudnn$convBiasActivationForward") {
instr.set_name("cudnn-conv-bias-activation");
}
*instr.mutable_shape() = shape.ToProto();
instr.set_custom_call_target(call_target_name);
instr.set_backend_config(opaque);
if (operand_shapes_with_layout.has_value()) {
instr.set_constrain_layout(true);
for (const Shape& operand_shape : *operand_shapes_with_layout) {
*instr.add_operand_shapes_with_layout() = operand_shape.ToProto();
}
}
if (literal != nullptr) {
*instr.mutable_literal() = literal->ToProto();
}
instr.set_custom_call_has_side_effect(has_side_effect);
if (computation != nullptr && !computation->IsNull()) {
AddCalledComputation(*computation, &instr);
}
for (const auto& pair : output_operand_aliasing) {
auto aliasing = instr.add_output_operand_aliasing();
aliasing->set_operand_index(pair.second.first);
for (int64_t index : pair.second.second) {
aliasing->add_operand_shape_index(index);
}
for (int64_t index : pair.first) {
aliasing->add_output_shape_index(index);
}
}
if (window.has_value()) {
*instr.mutable_window() = *window;
}
if (dnums.has_value()) {
*instr.mutable_convolution_dimension_numbers() = *dnums;
}
instr.set_custom_call_schedule(schedule);
instr.set_custom_call_api_version(api_version);
return AddInstruction(std::move(instr), HloOpcode::kCustomCall, operands);
}
XlaOp XlaBuilder::CustomCall(
const std::string& call_target_name, absl::Span<const XlaOp> operands,
const XlaComputation& computation, const Shape& shape,
const std::string& opaque,
std::optional<absl::Span<const Shape>> operand_shapes_with_layout,
bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (absl::StartsWith(call_target_name, "$")) {
return InvalidArgument(
"Invalid custom_call_target \"%s\": Call targets that start with '$' "
"are reserved for internal use.",
call_target_name);
}
if (operand_shapes_with_layout.has_value()) {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument(
"Result shape must have layout for custom call with constrained "
"layout.");
}
if (operands.size() != operand_shapes_with_layout->size()) {
return InvalidArgument(
"Must specify a shape with layout for each operand for custom call "
"with constrained layout; given %d shapes, expected %d",
operand_shapes_with_layout->size(), operands.size());
}
int64_t operand_num = 0;
for (const Shape& operand_shape : *operand_shapes_with_layout) {
if (!LayoutUtil::HasLayout(operand_shape)) {
return InvalidArgument(
"No layout specified for operand %d for custom call with "
"constrained layout.",
operand_num);
}
++operand_num;
}
}
return CustomCallInternal(
call_target_name, operands, &computation, shape, opaque,
operand_shapes_with_layout, has_side_effect, output_operand_aliasing,
literal, {}, {}, schedule, api_version);
});
}
XlaOp XlaBuilder::OptimizationBarrier(XlaOp operand) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
Shape shape = *operand_shape;
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kOptimizationBarrier,
{operand});
});
}
XlaOp XlaBuilder::Transpose(XlaOp operand,
absl::Span<const int64_t> permutation) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferTransposeShape(
*operand_shape, permutation));
return TransposeInternal(shape, operand, permutation);
});
}
absl::StatusOr<XlaOp> XlaBuilder::TransposeInternal(
const Shape& shape, XlaOp operand, absl::Span<const int64_t> permutation) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : permutation) {
instr.add_dimensions(dim);
}
return AddInstruction(std::move(instr), HloOpcode::kTranspose, {operand});
}
XlaOp XlaBuilder::Rev(XlaOp operand, absl::Span<const int64_t> dimensions) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferReverseShape(
*operand_shape, dimensions));
return RevInternal(shape, operand, dimensions);
});
}
absl::StatusOr<XlaOp> XlaBuilder::RevInternal(
const Shape& shape, XlaOp operand, absl::Span<const int64_t> dimensions) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : dimensions) {
instr.add_dimensions(dim);
}
return AddInstruction(std::move(instr), HloOpcode::kReverse, {operand});
}
XlaOp XlaBuilder::Sort(absl::Span<const XlaOp> operands,
const XlaComputation& comparator, int64_t dimension,
bool is_stable) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(std::vector<Shape> operand_shapes,
GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferVariadicOpShape(
HloOpcode::kSort, operand_shape_ptrs));
return SortInternal(shape, operands, comparator, dimension, is_stable);
});
}
absl::StatusOr<XlaOp> XlaBuilder::SortInternal(const Shape& shape,
absl::Span<const XlaOp> operands,
const XlaComputation& comparator,
int64_t dimension,
bool is_stable) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_is_stable(is_stable);
if (dimension == -1) {
TF_ASSIGN_OR_RETURN(const Shape* keys_shape, GetShapePtr(operands[0]));
dimension = keys_shape->rank() - 1;
}
instr.add_dimensions(dimension);
AddCalledComputation(comparator, &instr);
return AddInstruction(std::move(instr), HloOpcode::kSort, operands);
}
XlaOp XlaBuilder::TopK(XlaOp operand, int64_t k, bool largest) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferTopKShape(*operand_shape, k));
return TopKInternal(shape, operand, k, largest);
});
}
absl::StatusOr<XlaOp> XlaBuilder::TopKInternal(const Shape& shape,
XlaOp operand, int64_t k,
bool largest) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_k(k);
instr.set_largest(largest);
return AddInstruction(std::move(instr), HloOpcode::kTopK, {operand});
}
XlaOp XlaBuilder::ConvertElementType(XlaOp operand,
PrimitiveType new_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferConvertShape(
*operand_shape, new_element_type));
if (primitive_util::IsComplexType(operand_shape->element_type()) &&
!primitive_util::IsComplexType(new_element_type)) {
operand = Real(operand);
}
return AddOpWithShape(HloOpcode::kConvert, shape, {operand});
});
}
XlaOp XlaBuilder::BitcastConvertType(XlaOp operand,
PrimitiveType new_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferBitcastConvertShape(
*operand_shape, new_element_type));
return BitcastConvertTypeInternal(shape, operand);
});
}
absl::StatusOr<XlaOp> XlaBuilder::BitcastConvertTypeInternal(const Shape& shape,
XlaOp operand) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), HloOpcode::kBitcastConvert,
{operand});
}
XlaOp XlaBuilder::StochasticConvertType(XlaOp operand, XlaOp random,
PrimitiveType new_element_type) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* random_shape, GetShapePtr(random));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferStochasticConvertShape(
*operand_shape, *random_shape, new_element_type));
return AddOpWithShape(HloOpcode::kStochasticConvert, shape,
{operand, random});
});
}
XlaOp XlaBuilder::Clamp(XlaOp min, XlaOp operand, XlaOp max) {
return TernaryOp(HloOpcode::kClamp, min, operand, max);
}
XlaOp XlaBuilder::Map(absl::Span<const XlaOp> operands,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions,
absl::Span<const XlaOp> static_operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!static_operands.empty()) {
return Unimplemented("static_operands is not supported in Map");
}
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes, GetOperandShapes(operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferMapShape(
operand_shape_ptrs, called_program_shape, dimensions));
*instr.mutable_shape() = shape.ToProto();
Shape output_shape(instr.shape());
const int64_t output_rank = output_shape.rank();
AddCalledComputation(computation, &instr);
std::vector<XlaOp> new_operands(operands.begin(), operands.end());
for (XlaOp& new_operand : new_operands) {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(new_operand));
const int64_t rank = shape->rank();
if (rank != output_rank) {
TF_ASSIGN_OR_RETURN(new_operand,
InDimBroadcast(output_shape, new_operand, {}));
TF_ASSIGN_OR_RETURN(shape, GetShapePtr(new_operand));
}
if (!ShapeUtil::SameDimensions(output_shape, *shape)) {
TF_ASSIGN_OR_RETURN(new_operand,
AddBroadcastSequence(output_shape, new_operand));
}
}
return AddInstruction(std::move(instr), HloOpcode::kMap, new_operands);
});
}
XlaOp XlaBuilder::RngOp(RandomDistribution distribution,
absl::Span<const XlaOp> parameters,
const Shape& shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
switch (distribution) {
case RandomDistribution::RNG_NORMAL:
case RandomDistribution::RNG_UNIFORM:
if (parameters.size() != 2) {
return InvalidArgument(
"RNG distribution (%s) expects 2 parameters, but got %ld",
RandomDistribution_Name(distribution), parameters.size());
}
break;
default:
LOG(FATAL) << "unhandled distribution " << distribution;
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
return RngOpInternal(distribution, parameters, shape);
});
}
absl::StatusOr<XlaOp> XlaBuilder::RngOpInternal(
RandomDistribution distribution, absl::Span<const XlaOp> parameters,
const Shape& shape) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_distribution(distribution);
return AddInstruction(std::move(instr), HloOpcode::kRng, parameters);
}
XlaOp XlaBuilder::RngNormal(XlaOp mu, XlaOp sigma, const Shape& shape) {
return RngOp(RandomDistribution::RNG_NORMAL, {mu, sigma}, shape);
}
XlaOp XlaBuilder::RngUniform(XlaOp a, XlaOp b, const Shape& shape) {
return RngOp(RandomDistribution::RNG_UNIFORM, {a, b}, shape);
}
XlaOp XlaBuilder::RngBitGenerator(RandomAlgorithm algorithm,
XlaOp initial_state, const Shape& shape) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
TF_ASSIGN_OR_RETURN(Shape state_shape, GetShape(initial_state));
Shape output_shape = shape;
output_shape.set_element_type(PRIMITIVE_TYPE_INVALID);
if (primitive_util::IsArrayType(shape.element_type())) {
output_shape.set_element_type(
primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(shape.element_type())));
}
if (!primitive_util::IsUnsignedIntegralType(output_shape.element_type())) {
return InvalidArgument("Unsupported shape for RngBitGenerator: %s",
PrimitiveType_Name(shape.element_type()));
}
return RngBitGeneratorInternal(
ShapeUtil::MakeTupleShapeWithPtrs({&state_shape, &output_shape}),
algorithm, initial_state);
});
}
absl::StatusOr<XlaOp> XlaBuilder::RngBitGeneratorInternal(
const Shape& full_result_shape, RandomAlgorithm algorithm,
XlaOp initial_state) {
HloInstructionProto instr;
*instr.mutable_shape() = full_result_shape.ToProto();
instr.set_rng_algorithm(algorithm);
return AddInstruction(std::move(instr), HloOpcode::kRngBitGenerator,
{initial_state});
}
XlaOp XlaBuilder::While(const XlaComputation& condition,
const XlaComputation& body, XlaOp init) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const auto& body_program_shape, body.GetProgramShape());
TF_ASSIGN_OR_RETURN(const auto& condition_program_shape,
condition.GetProgramShape());
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferWhileShape(
condition_program_shape,
body_program_shape, *init_shape));
return WhileInternal(shape, condition, body, init);
});
}
absl::StatusOr<XlaOp> XlaBuilder::WhileInternal(const Shape& shape,
const XlaComputation& condition,
const XlaComputation& body,
XlaOp init) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(body, &instr);
AddCalledComputation(condition, &instr);
return AddInstruction(std::move(instr), HloOpcode::kWhile, {init});
}
XlaOp XlaBuilder::Gather(XlaOp input, XlaOp start_indices,
const GatherDimensionNumbers& dimension_numbers,
absl::Span<const int64_t> slice_sizes,
bool indices_are_sorted) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* input_shape, GetShapePtr(input));
TF_ASSIGN_OR_RETURN(const Shape* start_indices_shape,
GetShapePtr(start_indices));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferGatherShape(
*input_shape, *start_indices_shape,
dimension_numbers, slice_sizes));
return GatherInternal(shape, input, start_indices, dimension_numbers,
slice_sizes, indices_are_sorted);
});
}
absl::StatusOr<XlaOp> XlaBuilder::GatherInternal(
const Shape& shape, XlaOp input, XlaOp start_indices,
const GatherDimensionNumbers& dimension_numbers,
absl::Span<const int64_t> slice_sizes, bool indices_are_sorted) {
HloInstructionProto instr;
instr.set_indices_are_sorted(indices_are_sorted);
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_gather_dimension_numbers() = dimension_numbers;
for (int64_t bound : slice_sizes) {
instr.add_gather_slice_sizes(bound);
}
return AddInstruction(std::move(instr), HloOpcode::kGather,
{input, start_indices});
}
XlaOp XlaBuilder::Scatter(XlaOp input, XlaOp scatter_indices, XlaOp updates,
const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return Scatter(absl::MakeConstSpan(&input, 1), scatter_indices,
absl::MakeConstSpan(&updates, 1), update_computation,
dimension_numbers, indices_are_sorted, unique_indices);
}
XlaOp XlaBuilder::Scatter(absl::Span<const XlaOp> inputs, XlaOp scatter_indices,
absl::Span<const XlaOp> updates,
const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (inputs.empty()) {
return InvalidArgument("Scatter inputs cannot be empty.");
}
if (inputs.size() != updates.size()) {
return InvalidArgument(
"Scatter should have same number of inputs and updates: %d vs %d.",
inputs.size(), updates.size());
}
absl::InlinedVector<const Shape*, 3> operand_shapes;
operand_shapes.reserve(inputs.size() + 1 + updates.size());
for (const XlaOp& input : inputs) {
TF_ASSIGN_OR_RETURN(const Shape* input_shape, GetShapePtr(input));
operand_shapes.push_back(input_shape);
}
TF_ASSIGN_OR_RETURN(const Shape* scatter_indices_shape,
GetShapePtr(scatter_indices));
operand_shapes.push_back(scatter_indices_shape);
for (const XlaOp& update : updates) {
TF_ASSIGN_OR_RETURN(const Shape* update_shape, GetShapePtr(update));
operand_shapes.push_back(update_shape);
}
TF_ASSIGN_OR_RETURN(const ProgramShape& to_apply_shape,
update_computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferScatterShape(
operand_shapes, to_apply_shape, dimension_numbers));
return ScatterInternal(shape, inputs, scatter_indices, updates,
update_computation, dimension_numbers,
indices_are_sorted, unique_indices);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ScatterInternal(
const Shape& shape, absl::Span<const XlaOp> inputs, XlaOp scatter_indices,
absl::Span<const XlaOp> updates, const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers, bool indices_are_sorted,
bool unique_indices) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
instr.set_indices_are_sorted(indices_are_sorted);
instr.set_unique_indices(unique_indices);
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_scatter_dimension_numbers() = dimension_numbers;
AddCalledComputation(update_computation, &instr);
absl::InlinedVector<XlaOp, 3> operands;
operands.reserve(inputs.size() + 1 + updates.size());
absl::c_copy(inputs, std::back_inserter(operands));
operands.push_back(scatter_indices);
absl::c_copy(updates, std::back_inserter(operands));
return AddInstruction(std::move(instr), HloOpcode::kScatter, operands);
});
}
XlaOp XlaBuilder::Conditional(XlaOp predicate, XlaOp true_operand,
const XlaComputation& true_computation,
XlaOp false_operand,
const XlaComputation& false_computation) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(predicate));
if (!ShapeUtil::IsScalar(*shape) || shape->element_type() != PRED) {
return InvalidArgument(
"Argument to predicated-Conditional is not a scalar of PRED type "
"(%s).",
ShapeUtil::HumanString(*shape));
}
return ConditionalImpl(predicate, {&true_computation, &false_computation},
{true_operand, false_operand});
});
}
XlaOp XlaBuilder::Conditional(
XlaOp branch_index,
absl::Span<const XlaComputation* const> branch_computations,
absl::Span<const XlaOp> branch_operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(branch_index));
if (!ShapeUtil::IsScalar(*shape) || shape->element_type() != S32) {
return InvalidArgument(
"Argument to indexed-Conditional is not a scalar of S32 type (%s).",
ShapeUtil::HumanString(*shape));
}
return ConditionalImpl(branch_index, branch_computations, branch_operands);
});
}
XlaOp XlaBuilder::AllReduceImpl(XlaOp operand,
const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& layout,
const std::optional<bool> use_global_device_ids,
bool async) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> operand_shapes;
std::vector<XlaOp> operands;
if (operand_shape->IsTuple()) {
if (operand_shape->tuple_shapes_size() == 0) {
return Unimplemented("0 element tuple AllReduce is not supported");
}
for (int i = 0; i < operand_shape->tuple_shapes_size(); ++i) {
if (operand_shape->tuple_shapes(i).element_type() !=
operand_shape->tuple_shapes(0).element_type()) {
return Unimplemented(
"All the shapes of a tuple input of AllReduce must have the same "
"element type");
}
operand_shapes.push_back(&operand_shape->tuple_shapes(i));
operands.push_back(GetTupleElement(operand, i));
}
} else {
operand_shapes.push_back(operand_shape);
operands.push_back(operand);
}
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferAllReduceShape(operand_shapes));
if (layout) {
if (!LayoutUtil::HasLayout(*layout)) {
return InvalidArgument("shape_with_layout must have the layout set: %s",
ShapeUtil::HumanString(*layout));
}
if (!ShapeUtil::Compatible(*layout, *operand_shape)) {
return InvalidArgument(
"Provided shape_with_layout must be compatible with the "
"operand shape: %s vs %s",
ShapeUtil::HumanString(*layout),
ShapeUtil::HumanString(*operand_shape));
}
instr.set_constrain_layout(true);
if (operand_shape->IsTuple() && !inferred_shape.IsTuple()) {
TF_RET_CHECK(layout->tuple_shapes_size() == 1);
*instr.mutable_shape() = layout->tuple_shapes(0).ToProto();
} else {
*instr.mutable_shape() = layout->ToProto();
}
} else {
*instr.mutable_shape() = inferred_shape.ToProto();
}
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
if (use_global_device_ids.has_value()) {
instr.set_use_global_device_ids(*use_global_device_ids);
}
AddCalledComputation(computation, &instr);
TF_ASSIGN_OR_RETURN(auto all_reduce,
AddInstruction(std::move(instr),
async ? HloOpcode::kAllReduceStart
: HloOpcode::kAllReduce,
operands));
if (operand_shape->IsTuple() && !inferred_shape.IsTuple()) {
TF_RET_CHECK(operand_shapes.size() == 1);
TF_RET_CHECK(ShapeUtil::Compatible(*operand_shapes[0], inferred_shape));
return Tuple({all_reduce});
}
return all_reduce;
});
}
XlaOp XlaBuilder::AllGatherImpl(const XlaOp operand,
int64_t all_gather_dimension,
int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids,
bool async) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> operand_shapes;
std::vector<XlaOp> operands;
if (operand_shape->IsTuple()) {
if (operand_shape->tuple_shapes_size() == 0) {
return Unimplemented("0 element tuple AllGather is not supported");
}
for (int i = 0; i < operand_shape->tuple_shapes_size(); ++i) {
operand_shapes.push_back(&operand_shape->tuple_shapes(i));
operands.push_back(GetTupleElement(operand, i));
}
} else {
operand_shapes.push_back(operand_shape);
operands.push_back(operand);
}
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferAllGatherShape(
operand_shapes, all_gather_dimension, shard_count));
if (layout) {
*inferred_shape.mutable_layout() = *layout;
instr.set_constrain_layout(true);
}
*instr.mutable_shape() = inferred_shape.ToProto();
instr.add_dimensions(all_gather_dimension);
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
if (use_global_device_ids.has_value()) {
instr.set_use_global_device_ids(use_global_device_ids.value());
}
TF_ASSIGN_OR_RETURN(auto all_gather,
AddInstruction(std::move(instr),
async ? HloOpcode::kAllGatherStart
: HloOpcode::kAllGather,
operands));
return all_gather;
});
}
XlaOp XlaBuilder::ConditionalImpl(
XlaOp branch_index,
absl::Span<const XlaComputation* const> branch_computations,
absl::Span<const XlaOp> branch_operands) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* branch_index_shape,
GetShapePtr(branch_index));
std::vector<Shape> branch_operand_shapes(branch_operands.size());
std::vector<ProgramShape> branch_computation_shapes(
branch_computations.size());
for (int j = 0, end = branch_operands.size(); j < end; ++j) {
TF_ASSIGN_OR_RETURN(branch_operand_shapes[j],
GetShape(branch_operands[j]));
TF_ASSIGN_OR_RETURN(branch_computation_shapes[j],
branch_computations[j]->GetProgramShape());
}
TF_ASSIGN_OR_RETURN(const Shape shape,
ShapeInference::InferConditionalShape(
*branch_index_shape, branch_computation_shapes,
branch_operand_shapes));
*instr.mutable_shape() = shape.ToProto();
for (const XlaComputation* branch_computation : branch_computations) {
AddCalledComputation(*branch_computation, &instr);
}
std::vector<XlaOp> operands(1, branch_index);
for (const XlaOp branch_operand : branch_operands) {
operands.emplace_back(branch_operand);
}
return AddInstruction(std::move(instr), HloOpcode::kConditional,
absl::MakeSpan(operands));
});
}
absl::Status XlaBuilder::CheckOpBuilder(XlaOp op) const {
if (this != op.builder()) {
return InvalidArgument(
"XlaOp with handle %d is built by builder '%s', but is trying to use "
"it in builder '%s'",
op.handle(), op.builder()->name(), name());
}
return absl::OkStatus();
}
XlaOp XlaBuilder::Reduce(XlaOp operand, XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return Reduce(absl::Span<const XlaOp>({operand}),
absl::Span<const XlaOp>({init_value}), computation,
dimensions_to_reduce);
}
XlaOp XlaBuilder::Reduce(absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const ProgramShape& called_program_shape,
computation.GetProgramShape());
std::vector<XlaOp> all_operands;
all_operands.insert(all_operands.end(), operands.begin(), operands.end());
all_operands.insert(all_operands.end(), init_values.begin(),
init_values.end());
std::vector<const Shape*> operand_shape_ptrs;
TF_ASSIGN_OR_RETURN(const auto& operand_shapes,
GetOperandShapes(all_operands));
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferReduceShape(
operand_shape_ptrs, dimensions_to_reduce, called_program_shape));
return ReduceInternal(shape, all_operands, computation,
dimensions_to_reduce);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ReduceInternal(
const Shape& shape, absl::Span<const XlaOp> all_operands,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
for (int64_t dim : dimensions_to_reduce) {
instr.add_dimensions(dim);
}
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kReduce, all_operands);
});
}
XlaOp XlaBuilder::ReduceAll(XlaOp operand, XlaOp init_value,
const XlaComputation& computation) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<int64_t> all_dimnos(operand_shape->rank());
std::iota(all_dimnos.begin(), all_dimnos.end(), 0);
return Reduce(operand, init_value, computation, all_dimnos);
});
}
XlaOp XlaBuilder::ReduceWindow(XlaOp operand, XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding) {
return ReduceWindow(absl::MakeSpan(&operand, 1),
absl::MakeSpan(&init_value, 1), computation,
window_dimensions, window_strides, padding);
}
XlaOp XlaBuilder::ReduceWindow(absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
const Shape* operand_shape = nullptr;
for (const auto& operand : operands) {
TF_ASSIGN_OR_RETURN(operand_shape, GetShapePtr(operand));
TF_RETURN_IF_ERROR(ValidatePaddingValues(
operand_shape->dimensions(), window_dimensions, window_strides));
}
CHECK(operand_shape != nullptr);
std::vector<std::pair<int64_t, int64_t>> padding_values =
MakePadding(operand_shape->dimensions(), window_dimensions,
window_strides, padding);
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding_values,
{},
{}));
PaddingType padding_type = PADDING_INVALID;
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
if (operand_shape->is_dynamic_dimension(i) &&
!window_util::IsTrivialWindowDimension(window.dimensions(i)) &&
padding == Padding::kSame) {
padding_type = PADDING_SAME;
}
}
if (padding_type == PADDING_SAME) {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
ReduceWindowInternal(operands, init_values, computation,
window_dimensions, window_strides, {}, {},
padding_values));
instr.set_custom_call_target("DynamicReduceWindowSamePadding");
std::vector<XlaOp> args;
args.insert(args.end(), operands.begin(), operands.end());
args.insert(args.end(), init_values.begin(), init_values.end());
return AddInstruction(std::move(instr), HloOpcode::kCustomCall, args);
}
return ReduceWindowWithGeneralPadding(
operands, init_values, computation, window_dimensions, window_strides,
{}, {}, padding_values);
});
}
XlaOp XlaBuilder::ReduceWindowWithGeneralPadding(
absl::Span<const XlaOp> operands, absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
std::vector<const Shape*> operand_shapes, init_shapes;
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (operands.size() == 1) {
const auto& operand = operands[0];
const auto& init_value = init_values[0];
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
operand_shapes.push_back(operand_shape);
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init_value));
init_shapes.push_back(init_shape);
TF_ASSIGN_OR_RETURN(const ProgramShape& to_apply_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
base_dilations,
window_dilations));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferReduceWindowShape(
absl::MakeSpan(operand_shapes),
absl::MakeSpan(init_shapes), window,
to_apply_shape));
return ReduceWindowInternal(shape, operands[0], init_values[0],
computation, window);
}
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
ReduceWindowInternal(operands, init_values, computation,
window_dimensions, window_strides, base_dilations,
window_dilations, padding));
std::vector<XlaOp> args;
args.insert(args.end(), operands.begin(), operands.end());
args.insert(args.end(), init_values.begin(), init_values.end());
return AddInstruction(std::move(instr), HloOpcode::kReduceWindow, args);
});
}
absl::StatusOr<HloInstructionProto> XlaBuilder::ReduceWindowInternal(
absl::Span<const XlaOp> operands, absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
std::vector<const Shape*> operand_shapes, init_shapes;
for (int i = 0; i < operands.size(); ++i) {
const auto& operand = operands[i];
const auto& init_value = init_values[i];
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
operand_shapes.push_back(operand_shape);
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init_value));
init_shapes.push_back(init_shape);
}
TF_ASSIGN_OR_RETURN(const ProgramShape& to_apply_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
base_dilations,
window_dilations));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferReduceWindowShape(
absl::MakeSpan(operand_shapes),
absl::MakeSpan(init_shapes), window, to_apply_shape));
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = std::move(window);
AddCalledComputation(computation, &instr);
return instr;
}
absl::StatusOr<XlaOp> XlaBuilder::ReduceWindowInternal(
const Shape& shape, XlaOp operand, XlaOp init_value,
const XlaComputation& computation, Window window) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
*instr.mutable_window() = std::move(window);
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kReduceWindow,
{operand, init_value});
}
XlaOp XlaBuilder::BatchNormTraining(XlaOp operand, XlaOp scale, XlaOp offset,
float epsilon, int64_t feature_index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* scale_shape, GetShapePtr(scale));
TF_ASSIGN_OR_RETURN(const Shape* offset_shape, GetShapePtr(offset));
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferBatchNormTrainingShape(
*operand_shape, *scale_shape, *offset_shape, feature_index));
*instr.mutable_shape() = shape.ToProto();
instr.set_epsilon(epsilon);
instr.set_feature_index(feature_index);
return AddInstruction(std::move(instr), HloOpcode::kBatchNormTraining,
{operand, scale, offset});
});
}
XlaOp XlaBuilder::BatchNormInference(XlaOp operand, XlaOp scale, XlaOp offset,
XlaOp mean, XlaOp variance, float epsilon,
int64_t feature_index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* scale_shape, GetShapePtr(scale));
TF_ASSIGN_OR_RETURN(const Shape* offset_shape, GetShapePtr(offset));
TF_ASSIGN_OR_RETURN(const Shape* mean_shape, GetShapePtr(mean));
TF_ASSIGN_OR_RETURN(const Shape* variance_shape, GetShapePtr(variance));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferBatchNormInferenceShape(
*operand_shape, *scale_shape, *offset_shape,
*mean_shape, *variance_shape, feature_index));
*instr.mutable_shape() = shape.ToProto();
instr.set_epsilon(epsilon);
instr.set_feature_index(feature_index);
return AddInstruction(std::move(instr), HloOpcode::kBatchNormInference,
{operand, scale, offset, mean, variance});
});
}
XlaOp XlaBuilder::BatchNormGrad(XlaOp operand, XlaOp scale, XlaOp batch_mean,
XlaOp batch_var, XlaOp grad_output,
float epsilon, int64_t feature_index) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* scale_shape, GetShapePtr(scale));
TF_ASSIGN_OR_RETURN(const Shape* batch_mean_shape, GetShapePtr(batch_mean));
TF_ASSIGN_OR_RETURN(const Shape* batch_var_shape, GetShapePtr(batch_var));
TF_ASSIGN_OR_RETURN(const Shape* grad_output_shape,
GetShapePtr(grad_output));
TF_ASSIGN_OR_RETURN(
Shape shape, ShapeInference::InferBatchNormGradShape(
*operand_shape, *scale_shape, *batch_mean_shape,
*batch_var_shape, *grad_output_shape, feature_index));
*instr.mutable_shape() = shape.ToProto();
instr.set_epsilon(epsilon);
instr.set_feature_index(feature_index);
return AddInstruction(std::move(instr), HloOpcode::kBatchNormGrad,
{operand, scale, batch_mean, batch_var, grad_output});
});
}
XlaOp XlaBuilder::AllGather(XlaOp operand, int64_t all_gather_dimension,
int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return AllGatherImpl(operand, all_gather_dimension, shard_count,
replica_groups, channel_id, layout,
use_global_device_ids, false);
}
XlaOp XlaBuilder::CrossReplicaSum(
XlaOp operand, absl::Span<const ReplicaGroup> replica_groups) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* shape, GetShapePtr(operand));
const Shape* element_shape;
if (shape->IsTuple()) {
if (shape->tuple_shapes_size() == 0) {
return Unimplemented(
"0 element tuple CrossReplicaSum is not supported");
}
element_shape = &shape->tuple_shapes(0);
} else {
element_shape = shape;
}
const Shape scalar_shape =
ShapeUtil::MakeShape(element_shape->element_type(), {});
auto b = CreateSubBuilder("sum");
auto x = b->Parameter(0, scalar_shape, "x");
auto y = b->Parameter(1, scalar_shape, "y");
if (scalar_shape.element_type() == PRED) {
Or(x, y);
} else {
Add(x, y);
}
TF_ASSIGN_OR_RETURN(auto computation, b->Build());
return AllReduce(operand, computation, replica_groups,
std::nullopt);
});
}
XlaOp XlaBuilder::AllReduce(XlaOp operand, const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& shape_with_layout,
const std::optional<bool> use_global_device_ids) {
return AllReduceImpl(operand, computation, replica_groups, channel_id,
shape_with_layout, use_global_device_ids,
false);
}
XlaOp XlaBuilder::ReduceScatter(
XlaOp operand, const XlaComputation& computation, int64_t scatter_dimension,
int64_t shard_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<const Shape*> operand_shapes;
std::vector<XlaOp> operands;
if (operand_shape->IsTuple()) {
if (operand_shape->tuple_shapes_size() == 0) {
return Unimplemented("0 element tuple ReduceScatter is not supported");
}
for (int i = 0; i < operand_shape->tuple_shapes_size(); ++i) {
if (operand_shape->tuple_shapes(i).element_type() !=
operand_shape->tuple_shapes(0).element_type()) {
return Unimplemented(
"All the shapes of a tuple input of ReduceScatter must have "
"the same element type");
}
operand_shapes.push_back(&operand_shape->tuple_shapes(i));
operands.push_back(GetTupleElement(operand, i));
}
} else {
operand_shapes.push_back(operand_shape);
operands.push_back(operand);
}
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferReduceScatterShape(
operand_shapes, scatter_dimension, shard_count));
if (layout) {
*inferred_shape.mutable_layout() = *layout;
instr.set_constrain_layout(true);
}
*instr.mutable_shape() = inferred_shape.ToProto();
AddCalledComputation(computation, &instr);
instr.add_dimensions(scatter_dimension);
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
if (use_global_device_ids.has_value()) {
instr.set_use_global_device_ids(use_global_device_ids.value());
}
TF_ASSIGN_OR_RETURN(
auto reduce_scatter,
AddInstruction(std::move(instr), HloOpcode::kReduceScatter, operands));
return reduce_scatter;
});
}
XlaOp XlaBuilder::AllToAll(XlaOp operand, int64_t split_dimension,
int64_t concat_dimension, int64_t split_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
if (layout.has_value()) {
return AllToAllTuple(operand, split_dimension, concat_dimension,
split_count, replica_groups, layout, channel_id);
}
return AllToAllArray(operand, split_dimension, concat_dimension, split_count,
replica_groups, channel_id);
}
XlaOp XlaBuilder::AllToAllArray(
XlaOp operand, int64_t split_dimension, int64_t concat_dimension,
int64_t split_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(
const Shape all_to_all_shape,
ShapeInference::InferAllToAllShape(*operand_shape, split_dimension,
concat_dimension, split_count));
HloInstructionProto instr;
*instr.mutable_shape() = operand_shape->ToProto();
if (replica_groups.empty()) {
auto* group = instr.add_replica_groups();
for (int64_t i = 0; i < split_count; ++i) {
group->add_replica_ids(i);
}
} else {
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
}
instr.add_dimensions(split_dimension);
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
TF_ASSIGN_OR_RETURN(
XlaOp all_to_all,
AddInstruction(std::move(instr), HloOpcode::kAllToAll, {operand}));
if (split_dimension == concat_dimension) {
return all_to_all;
}
DimensionVector sizes;
const bool is_unbounded = operand_shape->is_unbounded_dynamic();
std::vector<XlaOp> dynamic_sizes;
auto GetR1DimensionSizeOrConstant = [&](XlaOp operand,
int64_t dimension) -> XlaOp {
if (operand_shape->is_unbounded_dynamic_dimension(dimension)) {
return Reshape(GetDimensionSize(operand, dimension), {1});
}
return ConstantR1<int32_t>(
this, {static_cast<int32_t>(operand_shape->dimensions(dimension))});
};
XlaOp r1_split_count =
ConstantR1<int32_t>(this, {static_cast<int32_t>(split_count)});
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
if (i != split_dimension) {
sizes.push_back(operand_shape->dimensions(i));
if (is_unbounded) {
dynamic_sizes.push_back(GetR1DimensionSizeOrConstant(operand, i));
}
continue;
}
sizes.push_back(split_count);
sizes.push_back(operand_shape->is_unbounded_dynamic_dimension(i)
? Shape::kUnboundedSize
: operand_shape->dimensions(i) / split_count);
if (is_unbounded) {
dynamic_sizes.push_back(r1_split_count);
dynamic_sizes.push_back(
operand_shape->is_unbounded_dynamic_dimension(i)
? Div(GetR1DimensionSizeOrConstant(operand, i), r1_split_count)
: ConstantR1<int32_t>(this,
{static_cast<int32_t>(sizes.back())}));
}
}
if (is_unbounded) {
std::vector<bool> dynamic_dimensions;
std::transform(
sizes.begin(), sizes.end(), std::back_inserter(dynamic_dimensions),
[](int64_t size) { return size == Shape::kUnboundedSize; });
TF_ASSIGN_OR_RETURN(
const Shape shape,
ShapeUtil::MakeValidatedShape(all_to_all_shape.element_type(), sizes,
dynamic_dimensions));
all_to_all =
MhloDynamicReshape(all_to_all, ConcatInDim(dynamic_sizes, 0), shape);
} else {
all_to_all = Reshape(all_to_all, sizes);
}
std::vector<int64_t> permutation;
const auto rank = operand_shape->rank();
permutation.reserve(rank + 1);
for (int64_t i = 0; i < rank; ++i) {
int64_t dim_after_reshape = i >= split_dimension ? i + 1 : i;
if (i == concat_dimension) {
permutation.push_back(split_dimension);
}
permutation.push_back(dim_after_reshape);
}
all_to_all = Transpose(all_to_all, permutation);
if (is_unbounded) {
std::vector<XlaOp> new_dimensions;
new_dimensions.reserve(operand_shape->rank());
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
new_dimensions.push_back(GetR1DimensionSizeOrConstant(operand, i));
}
new_dimensions[split_dimension] =
Div(new_dimensions[split_dimension], r1_split_count);
new_dimensions[concat_dimension] =
Mul(new_dimensions[concat_dimension], r1_split_count);
return MhloDynamicReshape(all_to_all, ConcatInDim(new_dimensions, 0),
all_to_all_shape);
}
return Reshape(all_to_all_shape, all_to_all);
});
}
XlaOp XlaBuilder::AllToAllTuple(
absl::Span<const XlaOp> operands,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(auto operand_shapes, this->GetOperandShapes(operands));
std::vector<const Shape*> operand_shape_ptrs;
operand_shape_ptrs.reserve(operand_shapes.size());
absl::c_transform(operand_shapes, std::back_inserter(operand_shape_ptrs),
[](const Shape& shape) { return &shape; });
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferAllToAllTupleShape(
operand_shape_ptrs));
if (layout) {
TF_RET_CHECK(shape.IsTuple() && !ShapeUtil::IsNestedTuple(shape));
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const int64_t layout_minor_to_major_size =
layout->minor_to_major().size();
if (layout_minor_to_major_size != shape.tuple_shapes(i).rank()) {
return InvalidArgument(
"Provided layout must be compatible with the operands' shape. "
"The layout is %s, but operand %d has shape %s.",
layout->ToString(), i,
ShapeUtil::HumanString(shape.tuple_shapes(i)));
}
*(shape.mutable_tuple_shapes(i)->mutable_layout()) = *layout;
}
instr.set_constrain_layout(true);
}
*instr.mutable_shape() = shape.ToProto();
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
return AddInstruction(std::move(instr), HloOpcode::kAllToAll, operands);
});
}
XlaOp XlaBuilder::AllToAllTuple(
XlaOp operand, int64_t split_dimension, int64_t concat_dimension,
int64_t split_count, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (operand_shape->is_unbounded_dynamic() ||
split_dimension == Shape::kUnboundedSize ||
concat_dimension == Shape::kUnboundedSize ||
split_count == Shape::kUnboundedSize) {
return InvalidArgument(
"AllToAllTuple does not support unbounded dynamic shapes");
}
TF_RETURN_IF_ERROR(
ShapeInference::InferAllToAllShape(*operand_shape, split_dimension,
concat_dimension, split_count)
.status());
std::vector<XlaOp> slices;
slices.reserve(split_count);
const int64_t block_size =
operand_shape->dimensions(split_dimension) / split_count;
for (int i = 0; i < split_count; i++) {
slices.push_back(SliceInDim(operand, i * block_size,
(i + 1) * block_size,
1, split_dimension));
}
XlaOp all_to_all =
this->AllToAllTuple(slices, replica_groups, layout, channel_id);
std::vector<XlaOp> received;
received.reserve(split_count);
for (int i = 0; i < split_count; i++) {
received.push_back(this->GetTupleElement(all_to_all, i));
}
return this->ConcatInDim(received, concat_dimension);
});
}
XlaOp XlaBuilder::CollectiveBroadcast(
XlaOp operand, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return CollectiveBroadcastImpl(operand, replica_groups, channel_id);
}
XlaOp XlaBuilder::CollectiveBroadcastImpl(
XlaOp operand, absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferCollectiveBroadcastShape({operand_shape}));
*instr.mutable_shape() = shape.ToProto();
for (const ReplicaGroup& group : replica_groups) {
*instr.add_replica_groups() = group;
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
return AddInstruction(std::move(instr), HloOpcode::kCollectiveBroadcast,
{operand});
});
}
XlaOp XlaBuilder::CollectivePermute(
XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id) {
return CollectivePermuteImpl(operand, source_target_pairs, channel_id,
false);
}
XlaOp XlaBuilder::CollectivePermuteImpl(
XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id, bool async) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(
Shape shape,
ShapeInference::InferCollectivePermuteShape({operand_shape}));
*instr.mutable_shape() = shape.ToProto();
for (const auto& pair : source_target_pairs) {
auto* proto_pair = instr.add_source_target_pairs();
proto_pair->set_source(pair.first);
proto_pair->set_target(pair.second);
}
if (channel_id.has_value()) {
instr.set_channel_id(channel_id->handle());
}
return AddInstruction(std::move(instr),
async ? HloOpcode::kCollectivePermuteStart
: HloOpcode::kCollectivePermute,
{operand});
});
}
XlaOp XlaBuilder::ReplicaId() {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
*instr.mutable_shape() = ShapeUtil::MakeShape(U32, {}).ToProto();
return AddInstruction(std::move(instr), HloOpcode::kReplicaId, {});
});
}
XlaOp XlaBuilder::SelectAndScatter(XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding, XlaOp source,
XlaOp init_value,
const XlaComputation& scatter) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
std::vector<std::pair<int64_t, int64_t>> padding_values =
MakePadding(operand_shape->dimensions(), window_dimensions,
window_strides, padding);
TF_ASSIGN_OR_RETURN(auto window,
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding_values,
{},
{}));
PaddingType padding_type = PADDING_INVALID;
for (int64_t i = 0; i < operand_shape->rank(); ++i) {
if (operand_shape->is_dynamic_dimension(i) &&
!window_util::IsTrivialWindowDimension(window.dimensions(i)) &&
padding == Padding::kSame) {
padding_type = PADDING_SAME;
}
}
if (padding_type == PADDING_SAME) {
TF_ASSIGN_OR_RETURN(
HloInstructionProto instr,
SelectAndScatterInternal(operand, select, window_dimensions,
window_strides, padding_values, source,
init_value, scatter));
instr.set_custom_call_target("DynamicSelectAndScatterSamePadding");
return AddInstruction(std::move(instr), HloOpcode::kCustomCall,
{operand, source, init_value});
}
return SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding_values,
source, init_value, scatter);
});
}
absl::StatusOr<HloInstructionProto> XlaBuilder::SelectAndScatterInternal(
XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, XlaOp source,
XlaOp init_value, const XlaComputation& scatter) {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* source_shape, GetShapePtr(source));
TF_ASSIGN_OR_RETURN(const Shape* init_shape, GetShapePtr(init_value));
TF_ASSIGN_OR_RETURN(const ProgramShape& select_shape,
select.GetProgramShape());
TF_ASSIGN_OR_RETURN(const ProgramShape& scatter_shape,
scatter.GetProgramShape());
TF_ASSIGN_OR_RETURN(*instr.mutable_window(),
ShapeInference::InferWindowFromDimensions(
window_dimensions, window_strides, padding,
{}, {}));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferSelectAndScatterShape(
*operand_shape, select_shape, instr.window(),
*source_shape, *init_shape, scatter_shape));
*instr.mutable_shape() = shape.ToProto();
AddCalledComputation(select, &instr);
AddCalledComputation(scatter, &instr);
return instr;
}
XlaOp XlaBuilder::SelectAndScatterWithGeneralPadding(
XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, XlaOp source,
XlaOp init_value, const XlaComputation& scatter) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(HloInstructionProto instr,
SelectAndScatterInternal(
operand, select, window_dimensions, window_strides,
padding, source, init_value, scatter));
return AddInstruction(std::move(instr), HloOpcode::kSelectAndScatter,
{operand, source, init_value});
});
}
XlaOp XlaBuilder::ReducePrecision(XlaOp operand, const int exponent_bits,
const int mantissa_bits) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferReducePrecisionShape(
*operand_shape, exponent_bits, mantissa_bits));
return ReducePrecisionInternal(shape, operand, exponent_bits,
mantissa_bits);
});
}
absl::StatusOr<XlaOp> XlaBuilder::ReducePrecisionInternal(
const Shape& shape, XlaOp operand, const int exponent_bits,
const int mantissa_bits) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.set_exponent_bits(exponent_bits);
instr.set_mantissa_bits(mantissa_bits);
return AddInstruction(std::move(instr), HloOpcode::kReducePrecision,
{operand});
}
void XlaBuilder::Send(XlaOp operand, const ChannelHandle& handle) {
ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
HloOpcode::kAfterAll, {}));
return SendWithToken(operand, token, handle);
});
}
XlaOp XlaBuilder::SendWithToken(XlaOp operand, XlaOp token,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (handle.type() != ChannelHandle::DEVICE_TO_DEVICE) {
return InvalidArgument("Send must use a device-to-device channel");
}
XlaOp send_op = internal::XlaBuilderFriend::BuildSend(this, operand, token,
handle, false);
return internal::XlaBuilderFriend::BuildSendDone(this, send_op, handle,
false);
});
}
XlaOp XlaBuilder::Recv(const Shape& shape, const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto token_instr;
*token_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
HloOpcode::kAfterAll, {}));
XlaOp recv = RecvWithToken(token, shape, handle);
HloInstructionProto recv_data;
*recv_data.mutable_shape() = shape.ToProto();
recv_data.set_tuple_index(0);
return AddInstruction(std::move(recv_data), HloOpcode::kGetTupleElement,
{recv});
});
}
XlaOp XlaBuilder::RecvWithToken(XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (handle.type() != ChannelHandle::DEVICE_TO_DEVICE) {
return InvalidArgument("Recv must use a device-to-device channel");
}
XlaOp recv_op = internal::XlaBuilderFriend::BuildRecv(this, token, shape,
handle, false);
return internal::XlaBuilderFriend::BuildRecvDone(this, recv_op, shape,
handle, false);
});
}
XlaOp XlaBuilder::SendToHost(XlaOp operand, XlaOp token,
const Shape& shape_with_layout,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape_with_layout)) {
return InvalidArgument("Shape passed to SendToHost must have a layout");
}
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
if (!ShapeUtil::Compatible(*operand_shape, shape_with_layout)) {
return InvalidArgument(
"SendToHost shape %s must be compatible with operand shape %s",
ShapeUtil::HumanStringWithLayout(shape_with_layout),
ShapeUtil::HumanStringWithLayout(*operand_shape));
}
if (!operand_shape->IsArray()) {
return InvalidArgument("SendToHost only supports array shapes, shape: %s",
ShapeUtil::HumanString(*operand_shape));
}
if (handle.type() != ChannelHandle::DEVICE_TO_HOST) {
return InvalidArgument("SendToHost must use a device-to-host channel");
}
HloInstructionProto send_instr;
*send_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape_with_layout,
ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()})
.ToProto();
send_instr.set_channel_id(handle.handle());
send_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp send,
AddInstruction(std::move(send_instr), HloOpcode::kSend,
{operand, token}));
HloInstructionProto send_done_instr;
*send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape().ToProto();
send_done_instr.set_channel_id(handle.handle());
send_done_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp send_done,
AddInstruction(std::move(send_done_instr),
HloOpcode::kSendDone, {send}));
return send_done;
});
}
XlaOp XlaBuilder::RecvFromHost(XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("Shape passed to RecvFromHost must have a layout");
}
if (!shape.IsArray()) {
return InvalidArgument(
"RecvFromHost only supports array shapes, shape: %s",
ShapeUtil::HumanString(shape));
}
if (handle.type() != ChannelHandle::HOST_TO_DEVICE) {
return InvalidArgument("RecvFromHost must use a host-to-device channel");
}
HloInstructionProto recv_instr;
*recv_instr.mutable_shape() =
ShapeUtil::MakeTupleShape(
{shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()})
.ToProto();
recv_instr.set_channel_id(handle.handle());
recv_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp recv, AddInstruction(std::move(recv_instr),
HloOpcode::kRecv, {token}));
HloInstructionProto recv_done_instr;
*recv_done_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()})
.ToProto();
recv_done_instr.set_channel_id(handle.handle());
recv_done_instr.set_is_host_transfer(true);
return AddInstruction(std::move(recv_done_instr), HloOpcode::kRecvDone,
{recv});
});
}
XlaOp XlaBuilder::GetDimensionSize(XlaOp operand, int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferGetDimensionSizeShape(
*operand_shape, dimension));
if (operand_shape->is_static_dimension(dimension)) {
return ConstantR0<int32_t>(this, operand_shape->dimensions(dimension));
}
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(dimension);
return AddInstruction(std::move(instr), HloOpcode::kGetDimensionSize,
{operand});
});
}
XlaOp XlaBuilder::RemoveDynamicDimension(XlaOp operand, int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
Shape shape = *operand_shape;
shape.set_dynamic_dimension(dimension, false);
XlaOp static_size =
ConstantR0<int32_t>(this, operand_shape->dimensions(dimension));
return SetDimensionSizeInternal(shape, operand, static_size, dimension);
});
}
XlaOp XlaBuilder::SetDimensionSize(XlaOp operand, XlaOp val,
int64_t dimension) {
return ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* operand_shape, GetShapePtr(operand));
TF_ASSIGN_OR_RETURN(const Shape* val_shape, GetShapePtr(val));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferSetDimensionSizeShape(
*operand_shape, *val_shape, dimension));
return SetDimensionSizeInternal(shape, operand, val, dimension);
});
}
absl::StatusOr<XlaOp> XlaBuilder::SetDimensionSizeInternal(const Shape& shape,
XlaOp operand,
XlaOp val,
int64_t dimension) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
instr.add_dimensions(dimension);
return AddInstruction(std::move(instr), HloOpcode::kSetDimensionSize,
{operand, val});
}
absl::StatusOr<bool> XlaBuilder::IsConstant(XlaOp operand) const {
TF_RETURN_IF_ERROR(first_error_);
TF_RETURN_IF_ERROR(LookUpInstruction(operand).status());
bool is_constant = true;
absl::flat_hash_set<int64_t> visited;
IsConstantVisitor(operand.handle(), 0, &visited, &is_constant);
return is_constant;
}
absl::StatusOr<XlaComputation> XlaBuilder::BuildConstantSubGraph(
XlaOp root_op, bool dynamic_dimension_is_minus_one) {
TF_ASSIGN_OR_RETURN(bool is_constant, IsConstant(root_op));
if (!is_constant) {
auto op_status = LookUpInstruction(root_op);
std::string op_string =
op_status.ok() ? op_status.value()->name() : "<unknown operation>";
return InvalidArgument(
"Operand to BuildConstantSubGraph depends on a parameter.\n\n"
" op requested for constant subgraph: %s\n\n"
"This is an internal error that typically happens when the XLA user "
"(e.g. TensorFlow) is attempting to determine a value that must be a "
"compile-time constant (e.g. an array dimension) but it is not capable "
"of being evaluated at XLA compile time.\n\n"
"Please file a usability bug with the framework being used (e.g. "
"TensorFlow).",
op_string);
}
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
LookUpInstruction(root_op));
if (VLOG_IS_ON(4)) {
VLOG(4) << "Build constant subgraph for:\n" << OpToString(root_op);
}
HloComputationProto entry;
SetProtoIdAndName(&entry, StrCat(name_, "_compute_constant"), kNameSeparator,
GetNextId());
ProgramShapeProto* program_shape = entry.mutable_program_shape();
*program_shape->mutable_result() = root->shape();
std::set<int64_t> related_ops;
absl::flat_hash_map<int64_t, int64_t> substitutions;
absl::flat_hash_set<int64_t> related_calls;
std::queue<int64_t> worklist;
worklist.push(root->id());
related_ops.insert(root->id());
while (!worklist.empty()) {
int64_t handle = worklist.front();
worklist.pop();
TF_ASSIGN_OR_RETURN(const HloInstructionProto* instr_proto,
LookUpInstructionByHandle(handle));
auto default_behavior = [&related_ops, &worklist, &related_calls,
instr_proto]() {
for (int64_t id : instr_proto->operand_ids()) {
if (related_ops.insert(id).second) {
worklist.push(id);
}
}
for (int64_t called_id : instr_proto->called_computation_ids()) {
related_calls.insert(called_id);
}
};
if (instr_proto->opcode() ==
HloOpcodeString(HloOpcode::kGetDimensionSize) ||
InstrIsSetBound(instr_proto)) {
int32_t constant_value = -1;
HloInstructionProto const_instr;
if (instr_proto->opcode() ==
HloOpcodeString(HloOpcode::kGetDimensionSize)) {
int64_t dimension = instr_proto->dimensions(0);
int64_t operand_handle = instr_proto->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
LookUpInstructionByHandle(operand_handle));
if (!(operand_proto->shape().is_dynamic_dimension(dimension) &&
dynamic_dimension_is_minus_one)) {
constant_value = static_cast<int32_t>(
operand_proto->shape().dimensions(dimension));
}
Literal literal = LiteralUtil::CreateR0(constant_value);
*const_instr.mutable_literal() = literal.ToProto();
*const_instr.mutable_shape() = literal.shape().ToProto();
} else {
if (instr_proto->literal().shape().element_type() == TUPLE) {
*const_instr.mutable_literal() =
instr_proto->literal().tuple_literals(0);
} else {
*const_instr.mutable_literal() = instr_proto->literal();
}
*const_instr.mutable_shape() = instr_proto->shape();
}
*const_instr.mutable_opcode() =
std::string(HloOpcodeString(HloOpcode::kConstant));
const_instr.set_id(handle);
*const_instr.mutable_name() =
GetFullName(const_instr.opcode(), kNameSeparator, const_instr.id());
*entry.add_instructions() =
const_instr;
} else if (instr_proto->opcode() ==
HloOpcodeString(HloOpcode::kGetTupleElement)) {
TF_ASSIGN_OR_RETURN(
const HloInstructionProto* maybe_tuple_instr,
LookUpInstructionByHandle(instr_proto->operand_ids(0)));
if (maybe_tuple_instr->opcode() == HloOpcodeString(HloOpcode::kTuple)) {
int64_t id = maybe_tuple_instr->operand_ids(instr_proto->tuple_index());
if (related_ops.insert(id).second) {
worklist.push(id);
}
substitutions[handle] = id;
} else {
default_behavior();
}
} else {
default_behavior();
}
}
int64_t root_id = root->id();
auto it = substitutions.find(root_id);
while (it != substitutions.end()) {
root_id = it->second;
it = substitutions.find(root_id);
}
entry.set_root_id(root_id);
for (int64_t id : related_ops) {
if (substitutions.find(id) != substitutions.end()) {
continue;
}
TF_ASSIGN_OR_RETURN(const HloInstructionProto* instr_src,
LookUpInstructionByHandle(id));
if (instr_src->opcode() == HloOpcodeString(HloOpcode::kGetDimensionSize) ||
InstrIsSetBound(instr_src)) {
continue;
}
HloInstructionProto* instr = entry.add_instructions();
*instr = *instr_src;
instr->clear_operand_ids();
for (int64_t operand_id : instr_src->operand_ids()) {
auto it = substitutions.find(operand_id);
while (it != substitutions.end()) {
operand_id = it->second;
it = substitutions.find(operand_id);
}
instr->add_operand_ids(operand_id);
}
const std::string& new_name =
StrCat(instr->name(), ".", entry.id(), ".", instr->id());
instr->set_name(new_name);
}
XlaComputation computation(entry.id());
HloModuleProto* module = computation.mutable_proto();
module->set_name(entry.name());
module->set_id(entry.id());
module->set_entry_computation_name(entry.name());
module->set_entry_computation_id(entry.id());
*module->mutable_host_program_shape() = *program_shape;
for (auto& e : embedded_) {
if (related_calls.find(e.second.id()) != related_calls.end()) {
*module->add_computations() = e.second;
}
}
*module->add_computations() = std::move(entry);
if (VLOG_IS_ON(4)) {
VLOG(4) << "Constant computation:\n" << module->DebugString();
}
return std::move(computation);
}
std::unique_ptr<XlaBuilder> XlaBuilder::CreateSubBuilder(
const std::string& computation_name) {
auto sub_builder = std::make_unique<XlaBuilder>(computation_name);
sub_builder->parent_builder_ = this;
sub_builder->die_immediately_on_error_ = this->die_immediately_on_error_;
return sub_builder;
}
ConvolutionDimensionNumbers
XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) {
ConvolutionDimensionNumbers dimension_numbers;
dimension_numbers.set_input_batch_dimension(kConvBatchDimension);
dimension_numbers.set_input_feature_dimension(kConvFeatureDimension);
dimension_numbers.set_output_batch_dimension(kConvBatchDimension);
dimension_numbers.set_output_feature_dimension(kConvFeatureDimension);
dimension_numbers.set_kernel_output_feature_dimension(
kConvKernelOutputDimension);
dimension_numbers.set_kernel_input_feature_dimension(
kConvKernelInputDimension);
for (int i = 0; i < num_spatial_dims; ++i) {
dimension_numbers.add_input_spatial_dimensions(i + 2);
dimension_numbers.add_kernel_spatial_dimensions(i + 2);
dimension_numbers.add_output_spatial_dimensions(i + 2);
}
return dimension_numbers;
}
absl::Status XlaBuilder::Validate(
const ConvolutionDimensionNumbers& dnum) {
if (dnum.input_spatial_dimensions_size() < 2) {
return FailedPrecondition("input spacial dimension < 2: %d",
dnum.input_spatial_dimensions_size());
}
if (dnum.kernel_spatial_dimensions_size() < 2) {
return FailedPrecondition("kernel spacial dimension < 2: %d",
dnum.kernel_spatial_dimensions_size());
}
if (dnum.output_spatial_dimensions_size() < 2) {
return FailedPrecondition("output spacial dimension < 2: %d",
dnum.output_spatial_dimensions_size());
}
if (std::set<int64_t>(
{dnum.input_batch_dimension(), dnum.input_feature_dimension(),
dnum.input_spatial_dimensions(0), dnum.input_spatial_dimensions(1)})
.size() != 4) {
return FailedPrecondition(
"dimension numbers for the input are not unique: (%d, %d, %d, "
"%d)",
dnum.input_batch_dimension(), dnum.input_feature_dimension(),
dnum.input_spatial_dimensions(0), dnum.input_spatial_dimensions(1));
}
if (std::set<int64_t>({dnum.kernel_output_feature_dimension(),
dnum.kernel_input_feature_dimension(),
dnum.kernel_spatial_dimensions(0),
dnum.kernel_spatial_dimensions(1)})
.size() != 4) {
return FailedPrecondition(
"dimension numbers for the weight are not unique: (%d, %d, %d, "
"%d)",
dnum.kernel_output_feature_dimension(),
dnum.kernel_input_feature_dimension(),
dnum.kernel_spatial_dimensions(0), dnum.kernel_spatial_dimensions(1));
}
if (std::set<int64_t>({dnum.output_batch_dimension(),
dnum.output_feature_dimension(),
dnum.output_spatial_dimensions(0),
dnum.output_spatial_dimensions(1)})
.size() != 4) {
return FailedPrecondition(
"dimension numbers for the output are not unique: (%d, %d, %d, "
"%d)",
dnum.output_batch_dimension(), dnum.output_feature_dimension(),
dnum.output_spatial_dimensions(0), dnum.output_spatial_dimensions(1));
}
return absl::OkStatus();
}
absl::StatusOr<XlaOp> XlaBuilder::AddInstruction(
HloInstructionProto&& instr, HloOpcode opcode,
absl::Span<const XlaOp> operands) {
TF_RETURN_IF_ERROR(first_error_);
const int64_t handle = GetNextId();
instr.set_id(handle);
*instr.mutable_opcode() = std::string(HloOpcodeString(opcode));
if (instr.name().empty()) {
instr.set_name(instr.opcode());
}
for (const auto& operand : operands) {
if (operand.builder_ == nullptr) {
return InvalidArgument("invalid XlaOp with handle %d", operand.handle());
}
if (operand.builder_ != this) {
return InvalidArgument("Do not add XlaOp from builder %s to builder %s",
operand.builder_->name(), this->name());
}
instr.add_operand_ids(operand.handle());
}
if (one_shot_metadata_.has_value()) {
*instr.mutable_metadata() = one_shot_metadata_.value();
one_shot_metadata_.reset();
} else {
*instr.mutable_metadata() = metadata_;
}
if (sharding_) {
TF_RETURN_IF_ERROR(NormalizeAndAssignSharing(&instr, *sharding_));
}
*instr.mutable_frontend_attributes() = frontend_attributes_;
handle_to_index_[handle] = instructions_.size();
instructions_.push_back(std::move(instr));
instruction_shapes_.push_back(
std::make_unique<Shape>(instructions_.back().shape()));
XlaOp op(handle, this);
return op;
}
absl::StatusOr<XlaOp> XlaBuilder::AddOpWithShape(
HloOpcode opcode, const Shape& shape, absl::Span<const XlaOp> operands) {
HloInstructionProto instr;
*instr.mutable_shape() = shape.ToProto();
return AddInstruction(std::move(instr), opcode, operands);
}
void XlaBuilder::AddCalledComputation(const XlaComputation& computation,
HloInstructionProto* instr) {
absl::flat_hash_map<int64_t, int64_t> remapped_ids;
std::vector<HloComputationProto> imported_computations;
imported_computations.reserve(computation.proto().computations_size());
for (const HloComputationProto& e : computation.proto().computations()) {
HloComputationProto new_computation(e);
int64_t computation_id = GetNextId();
remapped_ids[new_computation.id()] = computation_id;
SetProtoIdAndName(&new_computation,
GetBaseName(new_computation.name(), kNameSeparator),
kNameSeparator, computation_id);
for (auto& instruction : *new_computation.mutable_instructions()) {
int64_t instruction_id = GetNextId();
remapped_ids[instruction.id()] = instruction_id;
SetProtoIdAndName(&instruction,
GetBaseName(instruction.name(), kNameSeparator),
kNameSeparator, instruction_id);
}
new_computation.set_root_id(remapped_ids.at(new_computation.root_id()));
imported_computations.push_back(std::move(new_computation));
}
instr->add_called_computation_ids(
remapped_ids.at(computation.proto().entry_computation_id()));
for (auto& imported_computation : imported_computations) {
for (auto& instruction : *imported_computation.mutable_instructions()) {
for (auto& operand_id : *instruction.mutable_operand_ids()) {
operand_id = remapped_ids.at(operand_id);
}
for (auto& control_predecessor_id :
*instruction.mutable_control_predecessor_ids()) {
control_predecessor_id = remapped_ids.at(control_predecessor_id);
}
for (auto& called_computation_id :
*instruction.mutable_called_computation_ids()) {
called_computation_id = remapped_ids.at(called_computation_id);
}
}
int64_t computation_id = imported_computation.id();
for (int64_t i = 0; i < imported_computation.instructions_size(); ++i) {
ImportedInstruction imported_instruction;
imported_instruction.computation_id = computation_id;
imported_instruction.instruction_index = i;
handle_to_imported_index_.insert(
{imported_computation.instructions(i).id(), imported_instruction});
}
embedded_.insert({computation_id, std::move(imported_computation)});
}
}
absl::StatusOr<const HloInstructionProto*> XlaBuilder::LookUpInstruction(
const XlaOp op) const {
TF_RETURN_IF_ERROR(first_error_);
return LookUpInstructionInternal<const HloInstructionProto*>(op);
}
absl::StatusOr<const HloInstructionProto*>
XlaBuilder::LookUpInstructionByHandle(int64_t handle) const {
return LookUpInstructionByHandleInternal<const HloInstructionProto*>(handle);
}
absl::StatusOr<HloInstructionProto*> XlaBuilder::LookUpMutableInstruction(
const XlaOp op) {
TF_RETURN_IF_ERROR(first_error_);
return LookUpInstructionInternal<HloInstructionProto*>(op);
}
absl::StatusOr<HloInstructionProto*>
XlaBuilder::LookUpMutableInstructionByHandle(int64_t handle) {
return LookUpInstructionByHandleInternal<HloInstructionProto*>(handle);
}
XlaOp Parameter(XlaBuilder* builder, int64_t parameter_number,
const Shape& shape, const std::string& name) {
std::vector<bool> empty_bools;
return Parameter(builder, parameter_number, shape, name, empty_bools);
}
XlaOp Parameter(XlaBuilder* builder, int64_t parameter_number,
const Shape& shape, const std::string& name,
const std::vector<bool>& replicated_at_leaf_buffers) {
return builder->Parameter(parameter_number, shape, name,
replicated_at_leaf_buffers);
}
XlaOp ConstantLiteral(XlaBuilder* builder, const LiteralSlice& literal) {
return builder->ConstantLiteral(literal);
}
XlaOp Broadcast(const XlaOp operand,
absl::Span<const int64_t> broadcast_sizes) {
return operand.builder()->Broadcast(operand, broadcast_sizes);
}
XlaOp BroadcastInDim(const XlaOp operand,
absl::Span<const int64_t> out_dim_size,
absl::Span<const int64_t> broadcast_dimensions) {
return operand.builder()->BroadcastInDim(operand, out_dim_size,
broadcast_dimensions);
}
XlaOp MhloDynamicReshape(const XlaOp operand, const XlaOp output_shape,
const Shape& shape) {
return operand.builder()->MhloDynamicReshape(operand, output_shape, shape);
}
XlaOp MhloDynamicBroadcastInDim(const XlaOp operand,
const XlaOp output_dimensions,
absl::Span<const int64_t> broadcast_dimensions,
const Shape& output_shape) {
return operand.builder()->MhloDynamicBroadcastInDim(
operand, output_dimensions, broadcast_dimensions, output_shape);
}
XlaOp Copy(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCopy, operand);
}
XlaOp Pad(const XlaOp operand, const XlaOp padding_value,
const PaddingConfig& padding_config) {
return operand.builder()->Pad(operand, padding_value, padding_config);
}
XlaOp PadInDim(XlaOp operand, XlaOp padding_value, int64_t dimno,
int64_t pad_lo, int64_t pad_hi) {
return operand.builder()->PadInDim(operand, padding_value, dimno, pad_lo,
pad_hi);
}
XlaOp Reshape(const XlaOp operand, absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> new_sizes) {
return operand.builder()->Reshape(operand, dimensions, new_sizes);
}
XlaOp Reshape(const XlaOp operand, absl::Span<const int64_t> new_sizes) {
return operand.builder()->Reshape(operand, new_sizes);
}
XlaOp Reshape(const Shape& shape, XlaOp operand) {
return operand.builder()->Reshape(shape, operand);
}
XlaOp DynamicReshape(XlaOp operand, absl::Span<const XlaOp> dim_sizes,
absl::Span<const int64_t> new_size_bounds,
const std::vector<bool>& dims_are_dynamic) {
return operand.builder()->DynamicReshape(operand, dim_sizes, new_size_bounds,
dims_are_dynamic);
}
XlaOp ReshapeWithInferredDimension(XlaOp operand,
absl::Span<const int64_t> new_sizes,
int64_t inferred_dimension) {
return operand.builder()->Reshape(operand, new_sizes, inferred_dimension);
}
XlaOp Collapse(const XlaOp operand, absl::Span<const int64_t> dimensions) {
return operand.builder()->Collapse(operand, dimensions);
}
XlaOp Slice(const XlaOp operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
return operand.builder()->Slice(operand, start_indices, limit_indices,
strides);
}
XlaOp SliceInDim(const XlaOp operand, int64_t start_index, int64_t limit_index,
int64_t stride, int64_t dimno) {
return operand.builder()->SliceInDim(operand, start_index, limit_index,
stride, dimno);
}
XlaOp DynamicSlice(const XlaOp operand, absl::Span<const XlaOp> start_indices,
absl::Span<const int64_t> slice_sizes) {
return operand.builder()->DynamicSlice(operand, start_indices, slice_sizes);
}
XlaOp DynamicUpdateSlice(const XlaOp operand, const XlaOp update,
absl::Span<const XlaOp> start_indices) {
return operand.builder()->DynamicUpdateSlice(operand, update, start_indices);
}
XlaOp ConcatInDim(XlaBuilder* builder, absl::Span<const XlaOp> operands,
int64_t dimension) {
return builder->ConcatInDim(operands, dimension);
}
XlaOp Select(const XlaOp pred, const XlaOp on_true, const XlaOp on_false) {
return pred.builder()->Select(pred, on_true, on_false);
}
XlaOp Tuple(XlaBuilder* builder, absl::Span<const XlaOp> elements) {
return builder->Tuple(elements);
}
XlaOp GetTupleElement(const XlaOp tuple_data, int64_t index) {
return tuple_data.builder()->GetTupleElement(tuple_data, index);
}
XlaOp Eq(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kEq);
}
static XlaOp CompareTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
ComparisonDirection comparison_direction) {
auto b = lhs.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto operand_shape, b->GetShape(lhs));
auto operand_element_type = operand_shape.element_type();
auto compare_type =
primitive_util::IsFloatingPointType(operand_element_type)
? Comparison::Type::kFloatTotalOrder
: Comparison::DefaultComparisonType(operand_element_type);
return Compare(lhs, rhs, broadcast_dimensions, comparison_direction,
compare_type);
});
}
XlaOp EqTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kEq);
}
XlaOp Ne(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kNe);
}
XlaOp NeTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kNe);
}
XlaOp Ge(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kGe);
}
XlaOp GeTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kGe);
}
XlaOp Gt(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kGt);
}
XlaOp GtTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kGt);
}
XlaOp Le(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kLe);
}
XlaOp LeTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kLe);
}
XlaOp Lt(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return Compare(lhs, rhs, broadcast_dimensions, ComparisonDirection::kLt);
}
XlaOp LtTotalOrder(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return CompareTotalOrder(lhs, rhs, broadcast_dimensions,
ComparisonDirection::kLt);
}
XlaOp Compare(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
ComparisonDirection direction) {
return lhs.builder()->BinaryOp(HloOpcode::kCompare, lhs, rhs,
broadcast_dimensions, direction);
}
XlaOp Compare(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions,
ComparisonDirection direction, Comparison::Type compare_type) {
return lhs.builder()->BinaryOp(HloOpcode::kCompare, lhs, rhs,
broadcast_dimensions, direction, compare_type);
}
XlaOp Compare(const XlaOp lhs, const XlaOp rhs, ComparisonDirection direction) {
return Compare(lhs, rhs, {}, direction);
}
XlaOp Dot(const XlaOp lhs, const XlaOp rhs,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->Dot(lhs, rhs, precision_config, preferred_element_type);
}
XlaOp DotGeneral(const XlaOp lhs, const XlaOp rhs,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->DotGeneral(lhs, rhs, dimension_numbers,
precision_config, preferred_element_type);
}
XlaOp SparseDot(const XlaOp lhs, const XlaOp rhs,
absl::Span<const XlaOp> sparse_meta,
absl::Span<const SparsityDescriptor> sparsity,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->SparseDot(lhs, rhs, sparse_meta, sparsity,
dimension_numbers, precision_config,
preferred_element_type);
}
XlaOp Conv(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides, Padding padding,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->Conv(lhs, rhs, window_strides, padding,
feature_group_count, batch_group_count,
precision_config, preferred_element_type);
}
XlaOp ConvWithGeneralPadding(
const XlaOp lhs, const XlaOp rhs, absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->ConvWithGeneralPadding(
lhs, rhs, window_strides, padding, feature_group_count, batch_group_count,
precision_config, preferred_element_type);
}
XlaOp ConvWithGeneralDimensions(
const XlaOp lhs, const XlaOp rhs, absl::Span<const int64_t> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding, dimension_numbers, feature_group_count,
batch_group_count, precision_config, preferred_element_type);
}
XlaOp ConvGeneral(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->ConvGeneral(
lhs, rhs, window_strides, padding, dimension_numbers, feature_group_count,
batch_group_count, precision_config, preferred_element_type);
}
XlaOp ConvGeneralDilated(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::optional<std::vector<bool>> window_reversal) {
return lhs.builder()->ConvGeneralDilated(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, preferred_element_type, window_reversal);
}
XlaOp DynamicConvInputGrad(
XlaOp input_sizes, const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->DynamicConvInputGrad(
input_sizes, lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type);
}
XlaOp DynamicConvKernelGrad(
XlaOp activations, XlaOp gradients,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config, PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return activations.builder()->DynamicConvKernelGrad(
activations, gradients, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type);
}
XlaOp DynamicConvForward(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding,
absl::Span<const int64_t> lhs_dilation,
absl::Span<const int64_t> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64_t feature_group_count, int64_t batch_group_count,
const PrecisionConfig* precision_config,
PaddingType padding_type,
std::optional<PrimitiveType> preferred_element_type) {
return lhs.builder()->DynamicConvForward(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config, padding_type, preferred_element_type);
}
XlaOp Fft(const XlaOp operand, FftType fft_type,
absl::Span<const int64_t> fft_length) {
return operand.builder()->Fft(operand, fft_type, fft_length);
}
XlaOp TriangularSolve(XlaOp a, XlaOp b, bool left_side, bool lower,
bool unit_diagonal,
TriangularSolveOptions::Transpose transpose_a) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* a_shape, builder->GetShapePtr(a));
TF_ASSIGN_OR_RETURN(const Shape* b_shape, builder->GetShapePtr(b));
TriangularSolveOptions options;
options.set_left_side(left_side);
options.set_lower(lower);
options.set_unit_diagonal(unit_diagonal);
options.set_transpose_a(transpose_a);
TF_ASSIGN_OR_RETURN(Shape shape, ShapeInference::InferTriangularSolveShape(
*a_shape, *b_shape, options));
return builder->TriangularSolveInternal(shape, a, b, std::move(options));
});
}
XlaOp Cholesky(XlaOp a, bool lower) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* a_shape, builder->GetShapePtr(a));
TF_ASSIGN_OR_RETURN(Shape shape,
ShapeInference::InferCholeskyShape(*a_shape));
return builder->CholeskyInternal(shape, a, lower);
});
}
XlaOp Infeed(XlaBuilder* builder, const Shape& shape,
const std::string& config) {
return builder->Infeed(shape, config);
}
void Outfeed(const XlaOp operand, const Shape& shape_with_layout,
const std::string& outfeed_config) {
return operand.builder()->Outfeed(operand, shape_with_layout, outfeed_config);
}
XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
absl::Span<const XlaOp> operands) {
return builder->Call(computation, operands);
}
XlaOp CompositeCall(XlaBuilder* builder, const XlaComputation& computation,
absl::Span<const XlaOp> operands, const std::string& name,
std::optional<absl::string_view> attributes,
std::optional<int64_t> version) {
return builder->CompositeCall(computation, operands, name, attributes,
version);
}
XlaOp CustomCall(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const Shape& shape,
const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return builder->CustomCall(call_target_name, operands, shape, opaque,
std::nullopt,
has_side_effect, output_operand_aliasing, literal,
std::nullopt, std::nullopt,
schedule, api_version);
}
XlaOp CustomCallWithComputation(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const XlaComputation& computation,
const Shape& shape, const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return builder->CustomCall(
call_target_name, operands, computation, shape, opaque,
std::nullopt, has_side_effect,
output_operand_aliasing, literal, schedule, api_version);
}
XlaOp CustomCallWithLayout(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const Shape& shape,
absl::Span<const Shape> operand_shapes_with_layout,
const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, CustomCallSchedule schedule,
CustomCallApiVersion api_version) {
return builder->CustomCall(
call_target_name, operands, shape, opaque, operand_shapes_with_layout,
has_side_effect, output_operand_aliasing, literal,
std::nullopt, std::nullopt, schedule, api_version);
}
XlaOp CustomCallWithConvDnums(
XlaBuilder* builder, const std::string& call_target_name,
absl::Span<const XlaOp> operands, const Shape& shape,
absl::Span<const Shape> operand_shapes_with_layout,
const std::string& opaque, bool has_side_effect,
absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_operand_aliasing,
const Literal* literal, Window window, ConvolutionDimensionNumbers dnums,
CustomCallSchedule schedule, CustomCallApiVersion api_version) {
std::optional<absl::Span<const Shape>> maybe_operand_shapes;
if (!operand_shapes_with_layout.empty()) {
maybe_operand_shapes = operand_shapes_with_layout;
}
return builder->CustomCall(call_target_name, operands, shape, opaque,
maybe_operand_shapes, has_side_effect,
output_operand_aliasing, literal, window, dnums,
schedule, api_version);
}
XlaOp OptimizationBarrier(XlaOp operand) {
return operand.builder()->OptimizationBarrier(operand);
}
XlaOp Complex(const XlaOp real, const XlaOp imag,
absl::Span<const int64_t> broadcast_dimensions) {
return real.builder()->BinaryOp(HloOpcode::kComplex, real, imag,
broadcast_dimensions);
}
XlaOp Conj(const XlaOp operand) {
return Complex(Real(operand), Neg(Imag(operand)));
}
XlaOp Add(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kAdd, lhs, rhs,
broadcast_dimensions);
}
XlaOp Sub(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kSubtract, lhs, rhs,
broadcast_dimensions);
}
XlaOp Mul(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kMultiply, lhs, rhs,
broadcast_dimensions);
}
XlaOp Div(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kDivide, lhs, rhs,
broadcast_dimensions);
}
XlaOp Rem(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kRemainder, lhs, rhs,
broadcast_dimensions);
}
XlaOp Max(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kMaximum, lhs, rhs,
broadcast_dimensions);
}
XlaOp Min(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kMinimum, lhs, rhs,
broadcast_dimensions);
}
XlaOp And(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kAnd, lhs, rhs,
broadcast_dimensions);
}
XlaOp Or(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kOr, lhs, rhs,
broadcast_dimensions);
}
XlaOp Xor(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kXor, lhs, rhs,
broadcast_dimensions);
}
XlaOp Not(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kNot, operand);
}
XlaOp PopulationCount(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kPopulationCount, operand);
}
XlaOp ShiftLeft(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kShiftLeft, lhs, rhs,
broadcast_dimensions);
}
XlaOp ShiftRightArithmetic(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kShiftRightArithmetic, lhs, rhs,
broadcast_dimensions);
}
XlaOp ShiftRightLogical(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kShiftRightLogical, lhs, rhs,
broadcast_dimensions);
}
XlaOp Reduce(const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return operand.builder()->Reduce(operand, init_value, computation,
dimensions_to_reduce);
}
XlaOp Reduce(XlaBuilder* builder, absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions_to_reduce) {
return builder->Reduce(operands, init_values, computation,
dimensions_to_reduce);
}
XlaOp ReduceAll(const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation) {
return operand.builder()->ReduceAll(operand, init_value, computation);
}
XlaOp ReduceWindow(const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides, Padding padding) {
return operand.builder()->ReduceWindow(operand, init_value, computation,
window_dimensions, window_strides,
padding);
}
XlaOp ReduceWindow(absl::Span<const XlaOp> operands,
absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides, Padding padding) {
CHECK(!operands.empty());
return operands[0].builder()->ReduceWindow(operands, init_values, computation,
window_dimensions, window_strides,
padding);
}
XlaOp ReduceWindowWithGeneralPadding(
const XlaOp operand, const XlaOp init_value,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
return operand.builder()->ReduceWindowWithGeneralPadding(
absl::MakeSpan(&operand, 1), absl::MakeSpan(&init_value, 1), computation,
window_dimensions, window_strides, base_dilations, window_dilations,
padding);
}
XlaOp ReduceWindowWithGeneralPadding(
absl::Span<const XlaOp> operands, absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const int64_t> base_dilations,
absl::Span<const int64_t> window_dilations,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
CHECK(!operands.empty());
return operands[0].builder()->ReduceWindowWithGeneralPadding(
operands, init_values, computation, window_dimensions, window_strides,
base_dilations, window_dilations, padding);
}
XlaOp AllGather(const XlaOp operand, int64_t all_gather_dimension,
int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return operand.builder()->AllGather(operand, all_gather_dimension,
shard_count, replica_groups, channel_id,
layout, use_global_device_ids);
}
XlaOp AllGatherTuple(absl::Span<const XlaOp> operands,
int64_t all_gather_dimension, int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
CHECK(!operands.empty());
return operands[0].builder()->AllGather(
operands[0].builder()->Tuple(operands), all_gather_dimension, shard_count,
replica_groups, channel_id, layout, use_global_device_ids);
}
XlaOp CrossReplicaSum(const XlaOp operand,
absl::Span<const ReplicaGroup> replica_groups) {
return operand.builder()->CrossReplicaSum(operand, replica_groups);
}
XlaOp AllReduce(const XlaOp operand, const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& shape_with_layout,
const std::optional<bool> use_global_device_ids) {
return operand.builder()->AllReduce(operand, computation, replica_groups,
channel_id, shape_with_layout,
use_global_device_ids);
}
XlaOp AllReduceTuple(absl::Span<const XlaOp> operands,
const XlaComputation& computation,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Shape>& shape_with_layout,
const std::optional<bool> use_global_device_ids) {
CHECK(!operands.empty());
return operands[0].builder()->AllReduce(
operands[0].builder()->Tuple(operands), computation, replica_groups,
channel_id, shape_with_layout, use_global_device_ids);
}
XlaOp ReduceScatter(const XlaOp operand, const XlaComputation& computation,
int64_t scatter_dimension, int64_t shard_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id,
const std::optional<Layout>& layout,
const std::optional<bool> use_global_device_ids) {
return operand.builder()->ReduceScatter(
operand, computation, scatter_dimension, shard_count, replica_groups,
channel_id, layout, use_global_device_ids);
}
XlaOp AllToAll(const XlaOp operand, int64_t split_dimension,
int64_t concat_dimension, int64_t split_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->AllToAll(operand, split_dimension, concat_dimension,
split_count, replica_groups, layout,
channel_id);
}
XlaOp AllToAllTuple(absl::Span<const XlaOp> operands,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
CHECK(!operands.empty());
return operands[0].builder()->AllToAllTuple(operands, replica_groups, layout,
channel_id);
}
XlaOp AllToAllTuple(const XlaOp operand, int64_t split_dimension,
int64_t concat_dimension, int64_t split_count,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<Layout>& layout,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->AllToAllTuple(operand, split_dimension,
concat_dimension, split_count,
replica_groups, layout, channel_id);
}
XlaOp CollectiveBroadcast(const XlaOp operand,
absl::Span<const ReplicaGroup> replica_groups,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->CollectiveBroadcast(operand, replica_groups,
channel_id);
}
XlaOp CollectivePermute(
const XlaOp operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<ChannelHandle>& channel_id) {
return operand.builder()->CollectivePermute(operand, source_target_pairs,
channel_id);
}
XlaOp ReplicaId(XlaBuilder* builder) { return builder->ReplicaId(); }
XlaOp SelectAndScatter(const XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
Padding padding, const XlaOp source,
const XlaOp init_value, const XlaComputation& scatter) {
return operand.builder()->SelectAndScatter(operand, select, window_dimensions,
window_strides, padding, source,
init_value, scatter);
}
XlaOp SelectAndScatterWithGeneralPadding(
const XlaOp operand, const XlaComputation& select,
absl::Span<const int64_t> window_dimensions,
absl::Span<const int64_t> window_strides,
absl::Span<const std::pair<int64_t, int64_t>> padding, const XlaOp source,
const XlaOp init_value, const XlaComputation& scatter) {
return operand.builder()->SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source,
init_value, scatter);
}
XlaOp Abs(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kAbs, operand);
}
XlaOp Atan2(const XlaOp y, const XlaOp x,
absl::Span<const int64_t> broadcast_dimensions) {
return y.builder()->BinaryOp(HloOpcode::kAtan2, y, x, broadcast_dimensions);
}
XlaOp Exp(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kExp, operand);
}
XlaOp Expm1(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kExpm1, operand);
}
XlaOp Floor(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kFloor, operand);
}
XlaOp Ceil(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCeil, operand);
}
XlaOp Round(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kRoundNearestAfz, operand);
}
XlaOp RoundNearestEven(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kRoundNearestEven, operand);
}
XlaOp Log(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kLog, operand);
}
XlaOp Log1p(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kLog1p, operand);
}
XlaOp Erf(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kErf, operand);
}
XlaOp Logistic(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kLogistic, operand);
}
XlaOp Sign(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kSign, operand);
}
XlaOp Clz(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kClz, operand);
}
XlaOp Cos(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCos, operand);
}
XlaOp Sin(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kSin, operand);
}
XlaOp Tan(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kTan, operand);
}
XlaOp Tanh(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kTanh, operand);
}
XlaOp Real(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kReal, operand);
}
XlaOp Imag(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kImag, operand);
}
XlaOp Sqrt(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kSqrt, operand);
}
XlaOp Cbrt(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kCbrt, operand);
}
XlaOp Rsqrt(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kRsqrt, operand);
}
XlaOp Pow(const XlaOp lhs, const XlaOp rhs,
absl::Span<const int64_t> broadcast_dimensions) {
return lhs.builder()->BinaryOp(HloOpcode::kPower, lhs, rhs,
broadcast_dimensions);
}
XlaOp IsFinite(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kIsFinite, operand);
}
XlaOp ConvertElementType(const XlaOp operand, PrimitiveType new_element_type) {
return operand.builder()->ConvertElementType(operand, new_element_type);
}
XlaOp BitcastConvertType(const XlaOp operand, PrimitiveType new_element_type) {
return operand.builder()->BitcastConvertType(operand, new_element_type);
}
XlaOp StochasticConvertType(const XlaOp operand, const XlaOp random,
PrimitiveType new_element_type) {
return operand.builder()->StochasticConvertType(operand, random,
new_element_type);
}
XlaOp Neg(const XlaOp operand) {
return operand.builder()->UnaryOp(HloOpcode::kNegate, operand);
}
XlaOp Transpose(const XlaOp operand, absl::Span<const int64_t> permutation) {
return operand.builder()->Transpose(operand, permutation);
}
XlaOp Rev(const XlaOp operand, absl::Span<const int64_t> dimensions) {
return operand.builder()->Rev(operand, dimensions);
}
XlaOp Sort(absl::Span<const XlaOp> operands, const XlaComputation& comparator,
int64_t dimension, bool is_stable) {
return operands[0].builder()->Sort(operands, comparator, dimension,
is_stable);
}
XlaOp TopK(XlaOp operand, int64_t k, bool largest) {
return operand.builder()->TopK(operand, k, largest);
}
XlaOp Clamp(const XlaOp min, const XlaOp operand, const XlaOp max) {
return min.builder()->Clamp(min, operand, max);
}
XlaOp Map(XlaBuilder* builder, absl::Span<const XlaOp> operands,
const XlaComputation& computation,
absl::Span<const int64_t> dimensions,
absl::Span<const XlaOp> static_operands) {
return builder->Map(operands, computation, dimensions, static_operands);
}
XlaOp RngNormal(const XlaOp mu, const XlaOp sigma, const Shape& shape) {
return mu.builder()->RngNormal(mu, sigma, shape);
}
XlaOp RngUniform(const XlaOp a, const XlaOp b, const Shape& shape) {
return a.builder()->RngUniform(a, b, shape);
}
XlaOp RngBitGenerator(RandomAlgorithm algorithm, const XlaOp initial_state,
const Shape& shape) {
return initial_state.builder()->RngBitGenerator(algorithm, initial_state,
shape);
}
XlaOp While(const XlaComputation& condition, const XlaComputation& body,
const XlaOp init) {
return init.builder()->While(condition, body, init);
}
XlaOp Conditional(const XlaOp predicate, const XlaOp true_operand,
const XlaComputation& true_computation,
const XlaOp false_operand,
const XlaComputation& false_computation) {
return predicate.builder()->Conditional(predicate, true_operand,
true_computation, false_operand,
false_computation);
}
XlaOp Conditional(const XlaOp branch_index,
absl::Span<const XlaComputation* const> branch_computations,
absl::Span<const XlaOp> branch_operands) {
return branch_index.builder()->Conditional(branch_index, branch_computations,
branch_operands);
}
XlaOp ReducePrecision(const XlaOp operand, const int exponent_bits,
const int mantissa_bits) {
return operand.builder()->ReducePrecision(operand, exponent_bits,
mantissa_bits);
}
XlaOp Gather(const XlaOp input, const XlaOp start_indices,
const GatherDimensionNumbers& dimension_numbers,
absl::Span<const int64_t> slice_sizes, bool indices_are_sorted) {
return input.builder()->Gather(input, start_indices, dimension_numbers,
slice_sizes, indices_are_sorted);
}
XlaOp Scatter(const XlaOp input, const XlaOp scatter_indices,
const XlaOp updates, const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return input.builder()->Scatter(input, scatter_indices, updates,
update_computation, dimension_numbers,
indices_are_sorted, unique_indices);
}
XlaOp Scatter(absl::Span<const XlaOp> inputs, XlaOp scatter_indices,
absl::Span<const XlaOp> updates,
const XlaComputation& update_computation,
const ScatterDimensionNumbers& dimension_numbers,
bool indices_are_sorted, bool unique_indices) {
return scatter_indices.builder()->Scatter(
inputs, scatter_indices, updates, update_computation, dimension_numbers,
indices_are_sorted, unique_indices);
}
void Send(const XlaOp operand, const ChannelHandle& handle) {
return operand.builder()->Send(operand, handle);
}
XlaOp Recv(XlaBuilder* builder, const Shape& shape,
const ChannelHandle& handle) {
return builder->Recv(shape, handle);
}
XlaOp SendWithToken(const XlaOp operand, const XlaOp token,
const ChannelHandle& handle) {
return operand.builder()->SendWithToken(operand, token, handle);
}
XlaOp RecvWithToken(const XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return token.builder()->RecvWithToken(token, shape, handle);
}
XlaOp SendToHost(const XlaOp operand, const XlaOp token,
const Shape& shape_with_layout, const ChannelHandle& handle) {
return operand.builder()->SendToHost(operand, token, shape_with_layout,
handle);
}
XlaOp RecvFromHost(const XlaOp token, const Shape& shape,
const ChannelHandle& handle) {
return token.builder()->RecvFromHost(token, shape, handle);
}
XlaOp InfeedWithToken(const XlaOp token, const Shape& shape,
const std::string& config) {
return token.builder()->InfeedWithToken(token, shape, config);
}
XlaOp OutfeedWithToken(const XlaOp operand, const XlaOp token,
const Shape& shape_with_layout,
const std::string& outfeed_config) {
return operand.builder()->OutfeedWithToken(operand, token, shape_with_layout,
outfeed_config);
}
XlaOp CreateToken(XlaBuilder* builder) { return builder->CreateToken(); }
XlaOp AfterAll(XlaBuilder* builder, absl::Span<const XlaOp> tokens) {
return builder->AfterAll(tokens);
}
XlaOp BatchNormTraining(const XlaOp operand, const XlaOp scale,
const XlaOp offset, float epsilon,
int64_t feature_index) {
return operand.builder()->BatchNormTraining(operand, scale, offset, epsilon,
feature_index);
}
XlaOp BatchNormInference(const XlaOp operand, const XlaOp scale,
const XlaOp offset, const XlaOp mean,
const XlaOp variance, float epsilon,
int64_t feature_index) {
return operand.builder()->BatchNormInference(
operand, scale, offset, mean, variance, epsilon, feature_index);
}
XlaOp BatchNormGrad(const XlaOp operand, const XlaOp scale,
const XlaOp batch_mean, const XlaOp batch_var,
const XlaOp grad_output, float epsilon,
int64_t feature_index) {
return operand.builder()->BatchNormGrad(operand, scale, batch_mean, batch_var,
grad_output, epsilon, feature_index);
}
XlaOp Iota(XlaBuilder* builder, PrimitiveType type, int64_t size) {
return builder->Iota(type, size);
}
XlaOp Iota(XlaBuilder* builder, const Shape& shape, int64_t iota_dimension) {
return builder->Iota(shape, iota_dimension);
}
XlaOp GetDimensionSize(const XlaOp operand, int64_t dimension) {
return operand.builder()->GetDimensionSize(operand, dimension);
}
XlaOp SetDimensionSize(const XlaOp operand, const XlaOp val,
int64_t dimension) {
return operand.builder()->SetDimensionSize(operand, val, dimension);
}
XlaOp RemoveDynamicDimension(const XlaOp operand, int64_t dimension) {
return operand.builder()->RemoveDynamicDimension(operand, dimension);
}
OpSharding GetManualSharding(const OpSharding& original, int64_t single_dim) {
OpSharding manual;
if (single_dim < 0 || original.type() != OpSharding::OTHER) {
manual.set_type(OpSharding::MANUAL);
return manual;
}
manual.set_type(OpSharding::OTHER);
std::vector<int64_t> new_tile_shape(
original.tile_assignment_dimensions().begin(),
original.tile_assignment_dimensions().end());
new_tile_shape.push_back(new_tile_shape[single_dim]);
new_tile_shape[single_dim] = 1;
Array<int64_t> new_tile(new_tile_shape);
new_tile.Each([&](absl::Span<const int64_t> indices, int64_t* v) {
int64_t src_index = 0;
for (int64_t i = 0; i < indices.size() - 1; ++i) {
if (i > 0) {
src_index *= new_tile_shape[i];
}
int64_t index = indices[i];
if (i == single_dim) {
index = indices.back();
}
src_index += index;
}
*v = original.tile_assignment_devices(src_index);
});
for (int64_t dim : new_tile_shape) {
manual.add_tile_assignment_dimensions(dim);
}
for (int64_t device : new_tile) {
manual.add_tile_assignment_devices(device);
}
if (original.replicate_on_last_tile_dim()) {
manual.add_last_tile_dims(OpSharding::REPLICATED);
}
for (int64_t type : original.last_tile_dims()) {
manual.add_last_tile_dims(static_cast<OpSharding::Type>(type));
}
manual.add_last_tile_dims(OpSharding::MANUAL);
return manual;
}
absl::StatusOr<XlaOp> ConvertSpmdFullToShardShape(
XlaBuilder* builder, XlaOp input, int single_dim,
const OpSharding& manual_sharding,
absl::Span<const int64_t> unspecified_dims) {
TF_ASSIGN_OR_RETURN(const Shape input_shape, builder->GetShape(input));
Shape output_shape = input_shape;
const int64_t rank = output_shape.rank();
if (manual_sharding.type() == OpSharding::OTHER) {
for (int64_t i = 0; i < rank; ++i) {
if (single_dim >= 0 && i != single_dim) {
continue;
}
const int64_t partitions_i =
manual_sharding.tile_assignment_dimensions(i);
if (partitions_i == 1) continue;
const int64_t dim_size =
CeilOfRatio(output_shape.dimensions(i), partitions_i);
output_shape.set_dimensions(i, dim_size);
}
}
XlaOp input_annotation;
{
XlaScopedShardingAssignment assign_sharding(builder, manual_sharding);
input_annotation = CustomCall(
builder, "Sharding", {input}, input_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
{
OpSharding manual = GetManualSharding(manual_sharding, single_dim);
XlaScopedShardingAssignment assign_sharding(builder, manual);
return CustomCall(builder,
"SPMDFullToShardShape",
{input_annotation}, output_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
}
absl::StatusOr<XlaOp> ConvertSpmdShardToFullShape(
XlaBuilder* builder, XlaOp input, const Shape& output_shape, int single_dim,
const OpSharding& manual_sharding,
absl::Span<const int64_t> unspecified_dims) {
TF_ASSIGN_OR_RETURN(const Shape input_shape, builder->GetShape(input));
XlaOp input_annotation;
{
OpSharding manual = GetManualSharding(manual_sharding, single_dim);
XlaScopedShardingAssignment assign_sharding(builder, manual);
input_annotation = CustomCall(
builder, "Sharding", {input}, input_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
{
XlaScopedShardingAssignment assign_sharding(builder, manual_sharding);
return CustomCall(builder,
"SPMDShardToFullShape",
{input_annotation}, output_shape,
sharding_op_util::EncodeAttributes(unspecified_dims));
}
}
} | #include "xla/hlo/builder/xla_builder.h"
#include <algorithm>
#include <array>
#include <complex>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/builder/padding.h"
#include "xla/hlo/builder/sharding_builder.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::_;
using ::testing::HasSubstr;
using ::testing::Test;
using ::tsl::testing::StatusIs;
HloInstruction* GetRoot(HloModule& module) {
return module.entry_computation()->root_instruction();
}
absl::StatusOr<std::unique_ptr<HloModule>> BuildHloModule(XlaBuilder& b) {
TF_ASSIGN_OR_RETURN(XlaComputation computation,
b.Build(false));
const HloModuleProto& proto = computation.proto();
TF_ASSIGN_OR_RETURN(const auto& config,
HloModule::CreateModuleConfigFromProto(
proto, GetDebugOptionsFromFlags()));
return HloModule::CreateFromProto(proto, config);
}
absl::StatusOr<std::unique_ptr<HloModule>> BuildHloModule(XlaBuilder& b,
XlaOp root) {
TF_ASSIGN_OR_RETURN(XlaComputation computation,
b.Build(root, false));
const HloModuleProto& proto = computation.proto();
TF_ASSIGN_OR_RETURN(const auto& config,
HloModule::CreateModuleConfigFromProto(
proto, GetDebugOptionsFromFlags()));
return HloModule::CreateFromProto(proto, config);
}
std::string TestName() {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
TEST(XlaBuilderTest, OnePlusTwo) {
XlaBuilder b(TestName());
Add(ConstantR0<float>(&b, 1.0), ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Constant(), m::Constant())));
}
TEST(XlaBuilderTest, UnaryOperatorsBuildExpectedHLO) {
auto test_unary_operator = [&](std::function<XlaOp(XlaOp)> op,
auto matches_pattern) {
XlaBuilder b(TestName());
op(ConstantR0<int32_t>(&b, 1));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), matches_pattern);
};
test_unary_operator([](XlaOp x) { return -x; },
GmockMatch(m::Negate(m::Constant())));
test_unary_operator([](XlaOp x) { return ~x; },
GmockMatch(m::Not(m::Constant())));
}
TEST(XlaBuilderTest, BinaryOperatorsBuildExpectedHLO) {
auto test_binary_operator = [&](std::function<XlaOp(XlaOp, XlaOp)> op,
auto matches_pattern) {
XlaBuilder b(TestName());
op(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), matches_pattern);
};
test_binary_operator([](XlaOp x, XlaOp y) { return x + y; },
GmockMatch(m::Add(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x - y; },
GmockMatch(m::Subtract(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x * y; },
GmockMatch(m::Multiply(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x / y; },
GmockMatch(m::Divide(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x & y; },
GmockMatch(m::And(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x | y; },
GmockMatch(m::Or(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x ^ y; },
GmockMatch(m::Xor(m::Constant(), m::Constant())));
test_binary_operator([](XlaOp x, XlaOp y) { return x << y; },
GmockMatch(m::ShiftLeft(m::Constant(), m::Constant())));
test_binary_operator(
[](XlaOp x, XlaOp y) { return x >> y; },
GmockMatch(m::ShiftRightArithmetic(m::Constant(), m::Constant())));
auto test_unsigned_binary_operator =
[&](std::function<XlaOp(XlaOp, XlaOp)> op, auto matches_pattern) {
XlaBuilder b(TestName());
op(ConstantR0<uint32_t>(&b, 1), ConstantR0<uint32_t>(&b, 2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), matches_pattern);
};
test_unsigned_binary_operator(
[](XlaOp x, XlaOp y) { return x >> y; },
GmockMatch(m::ShiftRightLogical(m::Constant(), m::Constant())));
}
TEST(XlaBuilderTest, VariadicAnd) {
XlaBuilder b(TestName());
const Shape s = ShapeUtil::MakeShape(PRED, {});
And(Parameter(&b, 0, s, "p0"), Parameter(&b, 1, s, "p1"),
Parameter(&b, 2, s, "p2"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
::testing::AnyOf(
GmockMatch(m::And(m::Parameter(0),
m::And(m::Parameter(1), m::Parameter(2)))),
GmockMatch(m::And(m::And(m::Parameter(0), m::Parameter(1)),
m::Parameter(2)))));
}
TEST(XlaBuilderTest, VariadicOr) {
XlaBuilder b(TestName());
const Shape s = ShapeUtil::MakeShape(PRED, {});
Or(Parameter(&b, 0, s, "p0"), Parameter(&b, 1, s, "p1"),
Parameter(&b, 2, s, "p2"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
::testing::AnyOf(
GmockMatch(m::Or(m::Parameter(0),
m::Or(m::Parameter(1), m::Parameter(2)))),
GmockMatch(m::Or(m::Or(m::Parameter(0), m::Parameter(1)),
m::Parameter(2)))));
}
TEST(XlaBuilderTest, ShiftRightOperatorOnNonIntegerProducesError) {
XlaBuilder b(TestName());
ConstantR0<float>(&b, 1) >> ConstantR0<float>(&b, 2);
auto statusor = b.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
statusor.status().message(),
HasSubstr("Argument to >> operator does not have an integral type"));
}
TEST(XlaBuilderTest, ParamPlusConstantHasScalarBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {3, 5}), "x");
Add(x, ConstantR0<float>(&b, 1.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Parameter(), m::Broadcast(m::Constant()))));
}
TEST(XlaBuilderTest, ParamPlusConstantHasScalarBroadcastReversed) {
XlaBuilder b(TestName());
const XlaOp x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {3, 5}), "x");
Add(ConstantR0<float>(&b, 1.0), x);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Broadcast(m::Constant()), m::Parameter())));
}
TEST(XlaBuilderTest, ParamPlusParamHasBroadcast) {
XlaBuilder b(TestName());
const auto& x_shape = ShapeUtil::MakeShape(S32, {2, 4, 6});
const auto& y_shape = ShapeUtil::MakeShape(S32, {2, 4});
auto x = Parameter(&b, 0, x_shape, "x");
auto y = Parameter(&b, 1, y_shape, "y");
auto add = Add(x, y, {0, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto add_shape, b.GetShape(add));
EXPECT_TRUE(ShapeUtil::Equal(add_shape, x_shape));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(
GetRoot(*module),
GmockMatch(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))));
}
TEST(XlaBuilderTest, XPlusX) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {1, 3, 5, 7}), "x");
Add(x, x);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Parameter(0), m::Parameter(0))));
}
TEST(XlaBuilderTest, TestBinaryOpImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2, 2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2,2]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, TestBinaryOpImplicitBroadcastBounded) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[<=2, <=2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[<=2, <=2]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, ShapeInferenceError) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(U32, {2, 4, 6}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(U32, {2, 4}), "y");
Add(x, y);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("Shapes must be equal rank"));
}
TEST(XlaBuilderTest, DynamicDimensionReshapeToR0) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {1}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "dyn_dim");
auto dx = SetDimensionSize(x, y, 0);
Reshape(dx, {});
auto statusor = BuildHloModule(b);
ASSERT_TRUE(statusor.ok());
}
TEST(XlaBuilderTest, ParameterAlreadyRegistered) {
XlaBuilder b_call("add");
Parameter(&b_call, 0, ShapeUtil::MakeShape(PRED, {}), "x");
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "x");
auto y = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "y");
Add(x, y);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("parameter 0 already registered"));
}
TEST(XlaBuilderTest, Call) {
XlaBuilder b_call("the_only_to_apply");
auto p0 = Parameter(&b_call, 0, ShapeUtil::MakeShape(F32, {}), "p0");
auto p1 = Parameter(&b_call, 1, ShapeUtil::MakeShape(F32, {}), "p1");
Add(p0, p1);
TF_ASSERT_OK_AND_ASSIGN(const auto call, b_call.Build());
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
auto one = ConstantR0<float>(&b, 1);
auto two = ConstantR0<float>(&b, 2);
Add(Call(&b, call, {x, y}), Call(&b, call, {one, two}));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Call(m::Parameter(), m::Parameter()),
m::Call(m::Constant(), m::Constant()))));
}
TEST(XlaBuilderTest, CompositeCall) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Call(m::Parameter(), m::Parameter())));
}
TEST(XlaBuilderTest, CompositeCallFrontendAttributesStayLocal) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
Add(operands[0], operands[1]);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_TRUE(GetRoot(*module)->frontend_attributes().map().empty());
}
TEST(XlaBuilderTest, CompositeCallMissingName) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands), "",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("A composite call op must have frontend attributes "
"with key composite.name whose value is non-empty"));
}
TEST(XlaBuilderTest, CompositeCallMissingAttribute) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands), "foo.bar",
"", 1);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
statusor.status().message(),
HasSubstr(
"A composite call op must have frontend attributes with key "
"composite.attributes whose value is default: {} or non-empty"));
}
TEST(XlaBuilderTest, CompositeCallNonNegativeVersion) {
XlaBuilder b(TestName());
FrontendAttributes frontend_attributes = b.frontend_attributes();
frontend_attributes.mutable_map()->insert({"foo", "bar"});
b.SetFrontendAttributes(frontend_attributes);
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
-1);
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("A composite call op must have frontend attributes "
"with a composite.version whose value is a "
"non-negative integer but got: -1"));
}
TEST(XlaBuilderTest, CompositeCallOptionalVersionAndAttribute) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands), "foo.bar");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
ASSERT_THAT(GetRoot(*module),
GmockMatch(m::Call(m::Parameter(), m::Parameter())));
ASSERT_TRUE(GetRoot(*module)->frontend_attributes().map().contains(
"composite.attributes"));
EXPECT_EQ(
GetRoot(*module)->frontend_attributes().map().at("composite.attributes"),
"{}");
EXPECT_EQ(
GetRoot(*module)->frontend_attributes().map().at("composite.version"),
"0");
}
TEST(XlaBuilderTest, CompositeCallWithExtraFrontendAttributes) {
XlaBuilder b(TestName());
FrontendAttributes frontend_attributes = b.frontend_attributes();
frontend_attributes.mutable_map()->insert({"foo", "bar"});
b.SetFrontendAttributes(frontend_attributes);
const Shape shape = ShapeUtil::MakeShape(F32, {});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, shape, "arg0"), Parameter(&bsum, 1, shape, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation computation, bsum.Build());
std::vector<XlaOp> operands = {Parameter(&b, 0, shape, "arg0"),
Parameter(&b, 1, shape, "arg1")};
CompositeCall(&b, computation, absl::MakeSpan(operands),
"foo.bar",
"{n = 1 : i32, tensor = dense<1> : tensor<i32>}",
1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Call(m::Parameter(), m::Parameter())));
ASSERT_TRUE(GetRoot(*module)->frontend_attributes().map().contains("foo"));
EXPECT_EQ(GetRoot(*module)->frontend_attributes().map().at("foo"), "bar");
}
TEST(XlaBuilderTest, BinopHasDegenerateBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {1, 2, 3}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {1, 2, 1}), "y");
Add(x, y);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Parameter(0),
m::Broadcast(m::Reshape(m::Parameter(1))))));
}
TEST(XlaBuilderTest, BinopHasInDimAndDegenerateBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {2, 1, 4}), "y");
Add(x, y, {0, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Broadcast(m::Parameter(0)),
m::Broadcast(m::Reshape(m::Parameter(1))))));
}
TEST(XlaBuilderTest, BroadcastInDim) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3}), "x");
BroadcastInDim(x, {2, 4, 3},
{0, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Broadcast()));
}
TEST(XlaBuilderTest, BroadcastInDimWithDegeneratedDim) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 1, 4}), "x");
BroadcastInDim(x, {2, 3, 4},
{0, 1, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Broadcast(m::Reshape(m::Broadcast()))));
}
TEST(XlaBuilderTest, BroadcastInDimWithNegativeSize) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 1, 4}), "x");
BroadcastInDim(x, {-3, 3, 4},
{0, 1, 2});
auto statusor = BuildHloModule(b);
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("invalid shape"));
}
TEST(XlaBuilderTest, OperandFromWrongBuilder) {
XlaBuilder b1("b1");
auto p0 = Parameter(&b1, 0, ShapeUtil::MakeShape(F32, {}), "p0");
XlaBuilder builder("main");
auto p = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "p");
Add(p, p0);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(
statusor.status().message(),
HasSubstr(
"built by builder 'b1', but is trying to use it in builder 'main'"));
}
TEST(XlaBuilderTest, ReshapeDefaultOrder) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
Reshape(x, {6, 35});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Reshape(m::Parameter())));
}
TEST(XlaBuilderTest, ReshapeHasTranspose) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 3, 5, 7}), "x");
Reshape(x, {3, 2, 1, 0}, {6, 35});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Reshape(m::Transpose(m::Parameter()))));
}
TEST(XlaBuilderTest, Transpose) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
Transpose(x, {1, 0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Transpose(m::Parameter())));
}
TEST(XlaBuilderTest, AllGatherR1) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4}), "x");
AllGather(x, 0, 4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllGather);
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {16})));
}
TEST(XlaBuilderTest, AllGatherR2) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
AllGather(x, 1, 4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllGather);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {4, 64})));
}
TEST(XlaBuilderTest, AllGatherWithTuple) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4}), "x");
auto x2 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {16, 4}), "x2");
AllGather(Tuple(&b, {x, x2}), 0,
4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllGather);
EXPECT_TRUE(ShapeUtil::Equal(
root->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {16}),
ShapeUtil::MakeShape(F32, {64, 4})})));
}
TEST(XlaBuilderTest, AllGatherTuple) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {128, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {128, 8}), "p1");
AllGatherTuple({p0, p1}, 1, 4);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto tuple_shape =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {128, 16}),
ShapeUtil::MakeShape(F32, {128, 32})});
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAllGather)
.WithShapeEqualTo(&tuple_shape)));
}
TEST(XlaBuilderTest, ReduceScatter) {
XlaBuilder b(TestName());
XlaComputation to_apply;
{
auto sub_builder = b.CreateSubBuilder("add");
auto arg0 =
Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32), "x");
auto arg1 =
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
ReduceScatter(x, to_apply, 1, 2,
{group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kReduceScatter);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {4, 8})));
}
TEST(XlaBuilderTest, ReduceScatterWithTuple) {
XlaBuilder b(TestName());
XlaComputation to_apply;
{
auto sub_builder = b.CreateSubBuilder("add");
auto arg0 =
Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32), "x");
auto arg1 =
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
auto x2 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {16, 4}), "x2");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
ReduceScatter(Tuple(&b, {x, x2}), to_apply, 1,
2,
{group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kReduceScatter);
EXPECT_TRUE(ShapeUtil::Equal(
root->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4, 8}),
ShapeUtil::MakeShape(F32, {16, 2})})));
}
TEST(XlaBuilderTest, AllToAll) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16}), "x");
AllToAll(x, 1, 0,
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
EXPECT_EQ(root->operand(0)->operand(0)->operand(0)->opcode(),
HloOpcode::kAllToAll);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {8, 8})));
}
TEST(XlaBuilderTest, AllToAllSpecial) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {4, 16, 8}), "x");
AllToAll(x, 0, 0,
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_EQ(root->opcode(), HloOpcode::kAllToAll);
EXPECT_TRUE(
ShapeUtil::Equal(root->shape(), ShapeUtil::MakeShape(F32, {4, 16, 8})));
}
TEST(XlaBuilderTest, AllToAllTuple) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {2, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {2, 4}), "p1");
ReplicaGroup replica_group;
replica_group.add_replica_ids(0);
replica_group.add_replica_ids(1);
AllToAllTuple({p0, p1}, {replica_group}, LayoutUtil::MakeAscendingLayout(2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 4},
{0, 1});
auto tuple_shape =
ShapeUtil::MakeTupleShape({expected_shape, expected_shape});
auto is_replica_group_pred = [](const HloInstruction* instr) {
return instr->replica_groups().size() == 1 &&
absl::c_equal(instr->replica_groups()[0].replica_ids(),
std::vector<int64_t>{0, 1});
};
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAllToAll)
.WithShapeEqualTo(&tuple_shape)
.WithPredicate(is_replica_group_pred)));
}
TEST(XlaBuilderTest, AllReduceTuple) {
XlaBuilder b(TestName());
auto shape0 = ShapeUtil::MakeShape(F32, {});
auto shape1 = ShapeUtil::MakeShape(F32, {1, 2});
auto p0 = Parameter(&b, 0, shape0, "p0");
auto p1 = Parameter(&b, 1, shape1, "p1");
XlaBuilder bsum(TestName());
auto f32Scalar = ShapeUtil::MakeShape(F32, {});
Add(Parameter(&bsum, 0, f32Scalar, "x"), Parameter(&bsum, 1, f32Scalar, "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
AllReduceTuple({p0, p1}, sum);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto tuple_shape = ShapeUtil::MakeTupleShape({shape0, shape1});
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAllReduce)
.WithShapeEqualTo(&tuple_shape)));
}
TEST(XlaBuilderTest, CollectiveBroadcast) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
ReplicaGroup replica_group;
replica_group.add_replica_ids(0);
replica_group.add_replica_ids(1);
CollectiveBroadcast(x, {replica_group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kCollectiveBroadcast);
}
TEST(XlaBuilderTest, CollectivePermute) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
CollectivePermute(x, {{0, 1}, {1, 2}, {2, 3}});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kCollectivePermute);
}
TEST(XlaBuilderTest, GetDimensionSize) {
XlaBuilder b(TestName());
auto x =
Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}, {false, true}), "x");
GetDimensionSize(x, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kGetDimensionSize);
}
TEST(XlaBuilderTest, GetDimensionSizeConstant) {
XlaBuilder b(TestName());
auto x =
Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}, {false, true}), "x");
GetDimensionSize(x, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_EQ(GetRoot(*module)->opcode(), HloOpcode::kConstant);
}
TEST(XlaBuilderTest, ReportError) {
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {5, 7}), "x");
Add(b.ReportError(InvalidArgument("a test error")), x);
auto statusor = b.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("a test error"));
}
TEST(XlaBuilderTest, ReportErrorOrReturnHandlesNonErrors) {
XlaBuilder b(TestName());
absl::StatusOr<XlaOp> op(ConstantR0<float>(&b, 1.0));
Add(b.ReportErrorOrReturn(op), ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Add(m::Constant(), m::Constant())));
}
TEST(XlaBuilderTest, ReportErrorOrReturnHandlesErrors) {
XlaBuilder b(TestName());
absl::StatusOr<XlaOp> op(InvalidArgument("a test error"));
Add(b.ReportErrorOrReturn(op), ConstantR0<float>(&b, 2.0));
auto statusor = b.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("a test error"));
}
TEST(XlaBuilderTest, BuildWithSpecificRoot) {
XlaBuilder b(TestName());
const XlaOp constant = ConstantR0<float>(&b, 1.0);
Add(constant, ConstantR0<float>(&b, 2.0));
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, constant));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Constant()));
}
TEST(XlaBuilderTest, BuildWithSpecificRootAndMultipleParameters) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {42, 123});
const XlaOp x = Parameter(&b, 0, shape, "x");
const XlaOp y = Parameter(&b, 1, shape, "y");
const XlaOp z = Parameter(&b, 2, shape, "z");
Add(x, Sub(y, z));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, x));
EXPECT_THAT(GetRoot(*module), GmockMatch(m::Parameter()));
EXPECT_EQ(module->entry_computation()->num_parameters(), 3);
EXPECT_EQ(module->entry_computation()->instruction_count(), 5);
}
TEST(XlaBuilderTest, BuildWithSpecificRootWithWrongBuilder) {
XlaBuilder b(TestName());
XlaBuilder other_b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {42, 123});
Parameter(&b, 0, shape, "param");
const XlaOp other_param = Parameter(&other_b, 0, shape, "other_param");
absl::Status status = b.Build(other_param).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(
status.message(),
::testing::HasSubstr("root operation is not in this computation"));
}
TEST(XlaBuilderTest, ProtoMatches) {
std::vector<XlaComputation> computations;
const int n = 2;
computations.reserve(n);
for (int i = 0; i < n; ++i) {
XlaBuilder b_call("the_only_to_apply");
auto p0 = Parameter(&b_call, 0, ShapeUtil::MakeShape(F32, {}), "p0");
auto p1 = Parameter(&b_call, 1, ShapeUtil::MakeShape(F32, {}), "p1");
Add(p0, Add(p1, p0));
TF_ASSERT_OK_AND_ASSIGN(const auto call, b_call.Build());
XlaBuilder b(TestName());
auto x = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {}), "y");
auto one = ConstantR0<float>(&b, 1);
auto two = ConstantR0<float>(&b, 2);
Add(Call(&b, call, {x, y}), Call(&b, call, {one, two}));
computations.push_back(b.Build().value());
}
auto c0_string = computations[0].proto().SerializeAsString();
auto c1_string = computations[1].proto().SerializeAsString();
EXPECT_EQ(c0_string, c1_string);
}
TEST(XlaBuilderTest, DynamicParameter) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {6}, {true})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
Parameter(&b, 1, ShapeUtil::MakeShape(U32, {}), "p1");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, p0));
const Shape& param_shape = module->entry_computation()
->parameter_instruction(0)
->shape()
.tuple_shapes(1);
EXPECT_TRUE(param_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, SetDimensionSize) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
auto set_dim_size = SetDimensionSize(p0, p1, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, set_dim_size));
const Shape& root_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(root_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, RemoveDynamicDimension) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
auto set_dim_size = SetDimensionSize(p0, p1, 0);
auto remove_dim_size = RemoveDynamicDimension(set_dim_size, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, remove_dim_size));
const Shape& root_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_FALSE(root_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, RemoveDynamicDimensionMultiDims) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10, 10}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
auto set_dim_size = SetDimensionSize(p0, p1, 0);
set_dim_size = SetDimensionSize(set_dim_size, p1, 1);
auto remove_dim_size = RemoveDynamicDimension(set_dim_size, 0);
remove_dim_size = RemoveDynamicDimension(remove_dim_size, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module,
BuildHloModule(b, remove_dim_size));
const Shape& root_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_FALSE(root_shape.is_dynamic_dimension(0));
EXPECT_FALSE(root_shape.is_dynamic_dimension(1));
}
TEST(XlaBuilderTest, DynamicUnary) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}, {true}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
Neg(gte);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, DynamicBinary) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}, {true}),
ShapeUtil::MakeShape(F32, {5}, {true}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Add(gte0, gte1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(0));
}
TEST(XlaBuilderTest, DynamicBinaryHasBroadcast) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4}, {true, false}),
ShapeUtil::MakeShape(F32, {5}, {true}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Add(gte0, gte1, {0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicBroadcast) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4}, {true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
BroadcastInDim(gte, {3, 5, 4},
{1, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {false, true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicBinaryHasDegenerateBroadcast) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {10}, {true}),
ShapeUtil::MakeShape(F32, {1, 15}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Add(gte0, gte1, {0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicSelectOnlyPredDynamic) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {10}, {true}),
ShapeUtil::MakeShape(F32, {10}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Select(gte0, gte1, gte1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true}))
<< result_shape;
}
TEST(XlaBuilderTest, SelectIntoConditional) {
XlaBuilder b(TestName());
const Shape selector_shape = ShapeUtil::MakeShape(PRED, {});
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(F32, {})});
const XlaOp p0 = Parameter(&b, 0, selector_shape, "p0");
const XlaOp p1 = Parameter(&b, 1, tuple_param_shape, "p1");
const XlaOp p2 = Parameter(&b, 2, tuple_param_shape, "p2");
Select(p0, p1, p2);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Conditional(m::Parameter(0), m::Parameter(1),
m::Parameter(2))));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->branch_computation(0)
->root_instruction(),
GmockMatch(m::Parameter(0)));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->branch_computation(1)
->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST(XlaBuilderTest, DynamicPad) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4}, {true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto pad_val = ConstantR0<float>(&b, -1);
auto gte = GetTupleElement(p0, 0);
PaddingConfig padding_config;
for (int i = 0; i < 2; i++) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(0);
dimension->set_edge_padding_high(0);
dimension->set_interior_padding(0);
}
Pad(gte, pad_val, padding_config);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicConvolution) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {1, 2, 2, 128}, {true, false, false, false}),
ShapeUtil::MakeShape(F32, {2, 2, 128, 8}, {false, false, true, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto input = GetTupleElement(p0, 0);
auto filter = GetTupleElement(p0, 1);
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
ConvWithGeneralDimensions(input, filter, {1, 1}, Padding::kValid, dnums,
1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(),
{true, false, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicDot) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 3, 4}, {true, true, false}),
ShapeUtil::MakeShape(F32, {2, 4, 5}, {true, false, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto lhs = GetTupleElement(p0, 0);
auto rhs = GetTupleElement(p0, 1);
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
DotGeneral(lhs, rhs, dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {true, true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicReduce) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5, 4, 3}, {false, true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto init = ConstantR0<float>(&b, 0);
auto gte = GetTupleElement(p0, 0);
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
Reduce(gte, init, sum, {0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicReduceWindow) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 4, 8}, {true, false, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto init = ConstantR0<float>(&b, 0.f);
auto gte = GetTupleElement(p0, 0);
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
ReduceWindow(gte, init, sum, {1, 2, 4},
{1, 1, 1}, Padding::kValid);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
VLOG(2) << module->entry_computation()->root_instruction()->ToString()
<< "\n";
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {true, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, VariadicDynamicReduceWindow) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 4, 8}, {true, false, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto p1 = Parameter(&b, 1, tuple_param_shape, "p1");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p1, 0);
std::vector<XlaOp> input_operands = {gte0, gte1};
XlaBuilder bsum(TestName());
auto p2 = Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x0");
auto p3 = Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "x1");
auto p4 = Parameter(&bsum, 2, ShapeUtil::MakeShape(F32, {}), "y0");
auto p5 = Parameter(&bsum, 3, ShapeUtil::MakeShape(F32, {}), "y1");
std::vector<XlaOp> output_operands = {Add(p2, p4), Add(p3, p5)};
Tuple(&bsum, absl::MakeSpan(output_operands));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
auto init = ConstantR0<float>(&b, 0.f);
ReduceWindow(input_operands, {init, init}, sum,
{1, 2, 4},
{1, 1, 1}, Padding::kValid);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
VLOG(2) << module->entry_computation()->root_instruction()->ToString()
<< "\n";
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.tuple_shapes(0).dynamic_dimensions(),
{true, false, false}))
<< result_shape.tuple_shapes(0);
EXPECT_TRUE(ContainersEqual(result_shape.tuple_shapes(1).dynamic_dimensions(),
{true, false, false}))
<< result_shape.tuple_shapes(1);
}
TEST(XlaBuilderTest, DynamicSelectAndScatter) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 4, 8}, {true, false, false}),
ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto init = ConstantR0<float>(&b, 0.f);
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto sum, bsum.Build());
XlaBuilder bge(TestName());
Ge(Parameter(&bge, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bge, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const auto ge, bge.Build());
auto gte0 = GetTupleElement(p0, 0);
auto source = GetTupleElement(p0, 1);
SelectAndScatter(gte0, ge, {1, 2, 4}, {1, 2, 4}, Padding::kValid, source,
init, sum);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {true, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicReshape) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},
{false, false, true, true, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
Reshape(gte, {6, 4, 5, 2, 3});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(1));
EXPECT_TRUE(result_shape.is_dynamic_dimension(2));
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(),
{false, true, true, false, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicSelect) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, true, false}),
ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, true, false}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto pred = Parameter(&b, 1, ShapeUtil::MakeShape(PRED, {}), "pred");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Select(pred, gte0, gte1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(result_shape.is_dynamic_dimension(1));
EXPECT_FALSE(result_shape.is_dynamic_dimension(2));
EXPECT_TRUE(
ContainersEqual(result_shape.dynamic_dimensions(), {false, true, false}))
<< result_shape;
}
TEST(XlaBuilderTest, DynamicSelectNotCompatible) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, true, false}),
ShapeUtil::MakeShape(F32, {4, 5, 6}, {false, false, true}),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto pred = Parameter(&b, 1, ShapeUtil::MakeShape(PRED, {}), "pred");
auto gte0 = GetTupleElement(p0, 0);
auto gte1 = GetTupleElement(p0, 1);
Select(pred, gte0, gte1);
absl::Status status = BuildHloModule(b).status();
ASSERT_IS_OK(status);
}
TEST(XlaBuilderTest, DynamicTranspose) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 5}, {true, false}),
ShapeUtil::MakeShape(U32, {})});
auto p0 = Parameter(&b, 0, tuple_param_shape, "p0");
auto gte = GetTupleElement(p0, 0);
Transpose(gte, {1, 0});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
EXPECT_TRUE(ContainersEqual(result_shape.dynamic_dimensions(), {false, true}))
<< result_shape;
}
TEST(XlaBuilderTest, DotWithPreferredElementType) {
XlaBuilder b(TestName());
const Shape p0_shape = ShapeUtil::MakeShape(U8, {2, 3});
const Shape p1_shape = ShapeUtil::MakeShape(U16, {3, 2});
auto p0 = Parameter(&b, 0, p0_shape, "p0");
auto p1 = Parameter(&b, 1, p1_shape, "p1");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_rhs_contracting_dimensions(0);
DotGeneral(p0, p1, dnums, nullptr,
U32);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
ASSERT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(U32, {2, 2}), result_shape));
}
TEST(XlaBuilderTest, FftWithFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[5, <=10]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[5, <=10]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::FFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, FftWithIFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[5, <=10]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[5, <=10]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, FftWithRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f64[10, <=5]"));
const std::vector<int64_t> fft_length = {5};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c128[10, <=3]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::RFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, FftWithIRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c128[10, <=3]"));
const std::vector<int64_t> fft_length = {5};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f64[10, <=5]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IRFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, SparseDot) {
XlaBuilder b(TestName());
auto lhs = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {10, 16}), "lhs");
auto rhs = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {32, 20}), "rhs");
auto meta = Parameter(&b, 2, ShapeUtil::MakeShape(U16, {10, 2}), "meta");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_rhs_contracting_dimensions(0);
SparsityDescriptor sparsity_descriptor;
sparsity_descriptor.set_type(SparsityType::SPARSITY_STRUCTURED_N_M);
sparsity_descriptor.set_n(2);
sparsity_descriptor.set_m(4);
sparsity_descriptor.set_index(0);
sparsity_descriptor.set_dimension(1);
std::vector<SparsityDescriptor> sparsity = {sparsity_descriptor};
std::vector<XlaOp> sparse_meta = {meta};
SparseDot(lhs, rhs, sparse_meta, sparsity, dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[10, 20]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, ConvolutionWithPreferredElementType) {
XlaBuilder b(TestName());
const Shape p0_shape = ShapeUtil::MakeShape(S16, {1, 2, 2, 128});
const Shape p1_shape = ShapeUtil::MakeShape(S8, {2, 2, 128, 8});
auto p0 = Parameter(&b, 0, p0_shape, "p0");
auto p1 = Parameter(&b, 1, p1_shape, "p1");
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
ConvWithGeneralDimensions(p0, p1, {1, 1}, Padding::kValid, dnums,
1, 1,
nullptr,
S32);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const Shape& result_shape =
module->entry_computation()->root_instruction()->shape();
ASSERT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(S32, {1, 1, 1, 8}), result_shape));
}
TEST(XlaBuilderTest, AfterAllWithNonTokenOperands) {
XlaBuilder b(TestName());
AfterAll(&b, {CreateToken(&b), ConstantR0<float>(&b, 1.0)});
absl::Status status = b.Build().status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(status.message(),
::testing::HasSubstr("All operands to AfterAll must be tokens"));
}
TEST(XlaBuilderTest, AfterAllWithNoInputs) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("token[]"));
AfterAll(&b, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, CheckInputOutputAlias) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.SetUpAlias({1}, 0, {});
b.SetUpAlias({0}, 1, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, root));
const HloInputOutputAliasConfig& config = module->input_output_alias_config();
EXPECT_TRUE(config.ParameterHasAlias(0, {}));
EXPECT_TRUE(config.ParameterHasAlias(1, {}));
auto alias_p0 = config.GetAliasedOutput(0, {});
ASSERT_TRUE(alias_p0.has_value());
EXPECT_EQ(*alias_p0, ShapeIndex({1}));
auto alias_p1 = config.GetAliasedOutput(1, {});
ASSERT_TRUE(alias_p1.has_value());
EXPECT_EQ(*alias_p1, ShapeIndex({0}));
}
TEST(XlaBuilderTest, CheckBufferDonor) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.AddBufferDonor(0, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, root));
const HloBufferDonorConfig& config = module->buffer_donor_config();
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
}
TEST(XlaBuilderTest, ConstantLiteral) {
XlaBuilder b(TestName());
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
int old_csr = _mm_getcsr();
_mm_setcsr(old_csr | _MM_DENORMALS_ZERO_ON);
#endif
ConstantR1<float>(&b, {0.0f, 1.401298e-45f});
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
_mm_setcsr(old_csr);
#endif
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
ASSERT_THAT(root, GmockMatch(m::Constant()));
}
TEST(XlaBuilderTest, InvalidInputOutputAliasBufferDonor) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.SetUpAlias({1}, 0, {});
b.AddBufferDonor(0, {});
auto statusor = BuildHloModule(b, root);
EXPECT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("is already aliased with one output, thus it cannot be "
"added as a buffer donor for any output."));
}
TEST(XlaBuilderTest, ValidInputOutputAliasBufferDonor) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(F32, {8, 4}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(F32, {8, 4}), "p1");
auto add = Add(p0, p1);
auto sub = Sub(p0, p1);
auto root = Tuple(&b, {add, sub});
b.SetUpAlias({1}, 0, {});
b.AddBufferDonor(1, {});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b, root));
const HloInputOutputAliasConfig& io_alias_config =
module->input_output_alias_config();
const HloBufferDonorConfig& buffer_donor_config =
module->buffer_donor_config();
EXPECT_TRUE(io_alias_config.ParameterHasAlias(0, {}));
EXPECT_FALSE(io_alias_config.ParameterHasAlias(1, {}));
EXPECT_FALSE(buffer_donor_config.ParameterIsBufferDonor(0, {}));
EXPECT_TRUE(buffer_donor_config.ParameterIsBufferDonor(1, {}));
auto alias_p0 = io_alias_config.GetAliasedOutput(0, {});
ASSERT_TRUE(alias_p0.has_value());
EXPECT_EQ(*alias_p0, ShapeIndex({1}));
}
void ExpectAttributesMatch(const FrontendAttributes& attr,
const FrontendAttributes& ref) {
EXPECT_EQ(ref.map_size(), attr.map_size());
for (auto reference : ref.map()) {
auto other = attr.map().find(reference.first);
EXPECT_NE(other, attr.map().end());
EXPECT_EQ(other->second, reference.second);
}
}
void ExpectInstructionsAttributesMatch(
const HloModule& module, const std::vector<FrontendAttributes>& expected) {
ASSERT_EQ(module.computation_count(), 1);
auto expected_it = expected.begin();
for (auto inst : module.entry_computation()->instructions()) {
ASSERT_NE(expected_it, expected.end());
ExpectAttributesMatch(inst->frontend_attributes(), *expected_it);
expected_it++;
}
EXPECT_EQ(expected_it, expected.end());
}
TEST(XlaBuilderTest, SimpleSetFrontendAttributes) {
XlaBuilder b(TestName());
FrontendAttributes attributes;
ConstantR0(&b, 0);
(*attributes.mutable_map())["attr_a"] = "a";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
b.ClearFrontendAttributes();
ConstantR0(&b, 0);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
std::vector<FrontendAttributes> expected{FrontendAttributes(), attributes,
FrontendAttributes()};
ExpectInstructionsAttributesMatch(*module, expected);
}
TEST(XlaBuilderTest, ComplexSetFrontendAttributes) {
XlaBuilder b(TestName());
ConstantR0(&b, 0);
std::vector<FrontendAttributes> expected{FrontendAttributes()};
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_b"] = "b";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_b"] = "b";
(*attributes.mutable_map())["attr_c"] = "c";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
b.ClearFrontendAttributes();
ConstantR0(&b, 0);
expected.push_back(FrontendAttributes());
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
ExpectInstructionsAttributesMatch(*module, expected);
}
TEST(XlaBuilderTest, AddFrontendAttribute) {
XlaBuilder b(TestName());
ConstantR0(&b, 0);
std::vector<FrontendAttributes> expected{FrontendAttributes()};
{
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
b.SetFrontendAttributes(attributes);
ConstantR0(&b, 0);
expected.push_back(attributes);
}
{
auto op = ConstantR0(&b, 0);
EXPECT_IS_OK(b.SetInstructionFrontendAttribute(op, "attr_c", "c"));
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
(*attributes.mutable_map())["attr_c"] = "c";
expected.push_back(attributes);
}
{
auto op = ConstantR0(&b, 0);
EXPECT_IS_OK(b.SetInstructionFrontendAttribute(op, "attr_a", "a2"));
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a2";
expected.push_back(attributes);
}
{
auto op = ConstantR0(&b, 0);
(void)op;
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_a"] = "a";
expected.push_back(attributes);
}
b.ClearFrontendAttributes();
ConstantR0(&b, 0);
expected.push_back(FrontendAttributes());
{
auto op = ConstantR0(&b, 0);
EXPECT_IS_OK(b.SetInstructionFrontendAttribute(op, "attr_d", "d"));
FrontendAttributes attributes;
(*attributes.mutable_map())["attr_d"] = "d";
expected.push_back(attributes);
}
ConstantR0(&b, 0);
expected.push_back(FrontendAttributes());
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
ExpectInstructionsAttributesMatch(*module, expected);
}
TEST(XlaBuilderTest, SetAndGetSharding) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {1024});
OpSharding op_sharding_1 = sharding_builder::Replicate();
OpSharding op_sharding_2 = sharding_builder::Tile1D(shape, 4);
TF_ASSERT_OK_AND_ASSIGN(HloSharding hlo_sharding_1,
HloSharding::FromProto(op_sharding_1));
TF_ASSERT_OK_AND_ASSIGN(HloSharding hlo_sharding_2,
HloSharding::FromProto(op_sharding_2));
b.SetSharding(op_sharding_1);
XlaOp p0 = Parameter(&b, 0, shape, "p0");
TF_ASSERT_OK_AND_ASSIGN(auto p0_sharding, b.GetOpSharding(p0));
EXPECT_TRUE(p0_sharding.has_value());
EXPECT_EQ(HloSharding::FromProto(p0_sharding.value()).value(),
hlo_sharding_1);
EXPECT_TRUE(b.SetInstructionSharding(p0, std::nullopt).ok());
TF_ASSERT_OK_AND_ASSIGN(p0_sharding, b.GetOpSharding(p0));
EXPECT_FALSE(p0_sharding.has_value());
EXPECT_TRUE(b.SetInstructionSharding(p0, op_sharding_2).ok());
TF_ASSERT_OK_AND_ASSIGN(p0_sharding, b.GetOpSharding(p0));
EXPECT_TRUE(p0_sharding.has_value());
EXPECT_EQ(HloSharding::FromProto(p0_sharding.value()).value(),
hlo_sharding_2);
EXPECT_EQ(HloSharding::FromProto(b.sharding().value()).value(),
hlo_sharding_1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_TRUE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_EQ(module->entry_computation()->parameter_instruction(0)->sharding(),
hlo_sharding_2);
}
TEST(XlaBuilderTest, ComparisonType) {
XlaBuilder b(TestName());
(void)Le(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
ASSERT_THAT(root, GmockMatch(m::Compare(m::Constant(), m::Constant())));
EXPECT_EQ(Comparison::Type::kSigned,
DynCast<HloCompareInstruction>(root)->type());
}
TEST(XlaBuilderTest, StableLookUpInstructionByHandle) {
XlaBuilder b(TestName());
internal::XlaBuilderFriend builder_friend;
const XlaOp le = Le(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
HloInstructionProto* first_op = builder_friend.GetInstruction(le);
for (int i = 0; i < 100; ++i) {
(void)Le(ConstantR0<int32_t>(&b, 1), ConstantR0<int32_t>(&b, 2));
}
HloInstructionProto* first_op_now = builder_friend.GetInstruction(le);
EXPECT_EQ(first_op, first_op_now);
}
TEST(XlaBuilderTest, ComplexAbsConstant) {
XlaBuilder b(TestName());
const XlaOp out =
Abs(ConstantR0<std::complex<float>>(&b, std::complex<float>{-1, -1}));
ValueInference value_inference(&b);
absl::StatusOr<OptionalLiteral> analyzed =
value_inference.AnalyzeConstant(out, kUpperBound);
EXPECT_IS_OK(analyzed.status());
EXPECT_EQ(analyzed->GetValue().value().shape().element_type(),
PrimitiveType::F32);
}
TEST(XlaBuilderTest, OutfeedDummyTupleSharding) {
XlaBuilder b(TestName());
const XlaOp value = ConstantR1<int32_t>(&b, {0});
const Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(S32, {1},
{0});
Outfeed(value, shape, "");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_FALSE(module->entry_computation()->root_instruction()->has_sharding());
}
TEST(XlaBuilderTest, OutfeedTokenSharding) {
XlaBuilder b(TestName());
const XlaOp value = ConstantR1<int32_t>(&b, {0});
const Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(S32, {1},
{0});
b.SetSharding(sharding_builder::Replicate());
Outfeed(value, shape, "");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
auto it = std::find_if(module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end(),
HloPredicateIsOp<HloOpcode::kOutfeed>);
EXPECT_NE(it, module->entry_computation()->instructions().end());
auto* outfeed = *it;
EXPECT_TRUE(outfeed->has_sharding());
EXPECT_TRUE(outfeed->sharding().IsTuple());
EXPECT_EQ(outfeed->sharding().tuple_elements().size(), 2);
EXPECT_TRUE(outfeed->operand(1)->has_sharding());
EXPECT_EQ(outfeed->sharding().tuple_elements().back(),
HloSharding::FromProto(sharding_builder::AssignDevice(0)).value());
EXPECT_EQ(outfeed->operand(1)->sharding(),
HloSharding::FromProto(sharding_builder::AssignDevice(0)).value());
}
TEST(XlaBuilderTest, NormalizeTupleSharding) {
XlaBuilder b(TestName());
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {6})});
b.SetSharding(sharding_builder::Replicate());
Parameter(&b, 0, tuple_param_shape, "p0");
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_TRUE(root->has_sharding());
EXPECT_TRUE(root->sharding().IsTuple());
EXPECT_EQ(GetRoot(*module)->sharding().tuple_elements().size(), 2);
}
TEST(XlaBuilderTest, InvalidSharding) {
XlaBuilder b(TestName());
const Shape shape2d = ShapeUtil::MakeShape(F32, {6, 8});
const Shape shape1d = ShapeUtil::MakeShape(F32, {5});
b.SetSharding(sharding_builder::Tile1D(shape1d, 4));
Parameter(&b, 0, shape2d, "p0");
auto statusor = b.Build();
EXPECT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(),
HasSubstr("Number of tile assignment dimensions (excluding "
"subgroups) is different than the input rank"));
}
TEST(XlaBuilderTest, TopKDimensions) {
XlaBuilder b(TestName());
int64_t k = 1;
int64_t largest = true;
TopK(Parameter(&b, 0, ShapeUtil::MakeShape(F32, {6, 8}), "p0"), k, largest);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
const HloInstruction* root = GetRoot(*module);
EXPECT_TRUE(root->opcode() == HloOpcode::kTopK);
EXPECT_TRUE(root->shape().IsTuple());
EXPECT_EQ(root->shape().tuple_shapes_size(), 2);
EXPECT_EQ(root->shape().tuple_shapes(0).rank(), 2);
EXPECT_EQ(root->shape().tuple_shapes(1).rank(), 2);
EXPECT_EQ(root->shape().tuple_shapes(0).dimensions(0), 6);
EXPECT_EQ(root->shape().tuple_shapes(0).dimensions(1), k);
EXPECT_EQ(root->shape().tuple_shapes(1).dimensions(0), 6);
EXPECT_EQ(root->shape().tuple_shapes(1).dimensions(1), k);
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimExportSuccess) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[1, 2, 3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[1, 2, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_broadcast_in_dim"));
EXPECT_THAT(module->ToString(), HasSubstr("broadcast_dimensions=[1,2]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
MhloDynamicBroadcastInDimNonBroadcastDimSizeGreaterThanOne) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 2, 3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2, 2, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_broadcast_in_dim"));
EXPECT_THAT(module->ToString(), HasSubstr("broadcast_dimensions=[1,2]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimDynamicResultSize) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[1, 2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[1, 2, ?]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_broadcast_in_dim"));
EXPECT_THAT(module->ToString(), HasSubstr("broadcast_dimensions=[1,2]"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
MhloDynamicBroadcastInDimInvalidOutputDimensionsElementType) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("f32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 3, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("output_dimensions must be an integer type f32[3]")));
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimInvalidOutputDimensionsRank) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions,
ParseShape("s32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 3, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("output_dimensions must be rank 1 but got rank 2")));
}
TEST(XlaBuilderTest, MhloDynamicBroadcastInDimIncompatibleBroadcastSize) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_dimensions, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("f32[2, 3, 3]"));
MhloDynamicBroadcastInDim(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_dimensions, "output_dimensions"),
{1, 2}, output_shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_, HasSubstr("size of operand dimension 0 (2) is not compatible "
"with size of result dimension 1 (3)")));
}
TEST(XlaBuilderTest, MhloDynamicReshapeExportSuccess) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(module->ToString(), HasSubstr("mhlo.dynamic_reshape"));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, MhloDynamicReshapeIncompatibleElementType) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("s32[?, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("Element type of operand f32[?,15] and "
"output s32[?,15] must match")));
}
TEST(XlaBuilderTest, MhloDynamicReshapeElementCountMismatch) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[4, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("MhloDynamicReshape has mismatched "
"element counts: from=45 (f32[3,15]) "
"to=60 (f32[4,15])")));
}
TEST(XlaBuilderTest, MhloDynamicReshapeRankMismatch) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output_shape, ParseShape("s32[3]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 15]"));
MhloDynamicReshape(
Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, output_shape, "output_shape"),
shape);
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_, HasSubstr("output_shape dimension size=3 (s32[3]) and rank "
"of shape=2 (f32[?,15]) must match")));
}
struct UnaryOpTestCase {
std::string operand;
std::string expected;
std::function<XlaOp(XlaOp)> unary_op;
};
struct BinaryOpTestCase {
std::string lhs;
std::string rhs;
absl::Span<const int64_t> broadcast_dimensions;
std::string expected;
std::function<XlaOp(XlaOp, XlaOp, absl::Span<const int64_t>)> binary_op;
std::optional<std::string_view> error_message;
};
constexpr absl::string_view kBroadcastDimensionMismatch =
"Broadcast dimension 0 mismatch: 2 != -9223372036854775808; f32[2] and "
"f32[?,10].";
std::array<const int64_t, 0> empty_array = {};
std::array<const int64_t, 1> zero_array = {0};
class XlaBuilderUnboundedUnaryOpTest
: public ::testing::TestWithParam<UnaryOpTestCase> {};
class XlaBuilderUnboundedBinaryOpTest
: public ::testing::TestWithParam<BinaryOpTestCase> {};
TEST_P(XlaBuilderUnboundedUnaryOpTest, UnboundedUnaryOpTest) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape(GetParam().operand));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape(GetParam().expected));
GetParam().unary_op(Parameter(&b, 0, operand, "operand"));
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST_P(XlaBuilderUnboundedBinaryOpTest, UnboundedBinaryOpTest) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape(GetParam().lhs));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape(GetParam().rhs));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape(GetParam().expected));
GetParam().binary_op(Parameter(&b, 0, lhs, "lhs"),
Parameter(&b, 1, rhs, "rhs"),
GetParam().broadcast_dimensions);
if (const auto result = BuildHloModule(b); result.ok()) {
ASSERT_NE(*result, nullptr);
EXPECT_THAT(GetRoot(**result),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
} else {
ASSERT_TRUE(GetParam().error_message.has_value());
EXPECT_THAT(result, StatusIs(_, HasSubstr(*GetParam().error_message)));
}
}
TEST(XlaBuilderTest, UnboundedAddScalarBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAddDegenerateBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[1, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{0, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAddUnsupportedImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Add(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
zero_array);
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr(kBroadcastDimensionMismatch)));
}
TEST(XlaBuilderTest, UnboundedAllGather) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
AllGather(Parameter(&b, 0, operand, "operand"), 0,
2,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllReduce) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, operand, "arg0"),
Parameter(sub_builder.get(), 1, operand, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
AllReduce(Parameter(&b, 0, operand, "operand"), computation,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllDynamicSplitDimension) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 45]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllDynamicConcatDimension) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 5]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
1,
0,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllDynamicSplitAndConcatDimensionEqual) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 15]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
0,
0,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllFullyDynamic) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?]"));
AllToAll(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedAllToAllTupleVariadicUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple({Parameter(&b, 0, operand, "operand0"),
Parameter(&b, 1, operand, "operand1")},
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"AllToAllTuple does not support unbounded dynamic shapes")));
}
TEST(XlaBuilderTest, UnboundedAllToAllTupleUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"AllToAllTuple does not support unbounded dynamic shapes")));
}
TEST(XlaBuilderTest, BoundedAllToAllTupleUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, <=15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("AllToAll does not support bounded dynamic shapes")));
}
TEST(XlaBuilderTest, BoundedAllToAllUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, <=15]{1,0}"));
b.ReportErrorOrReturn(
AllToAllTuple(Parameter(&b, 0, operand, "operand"),
0,
1,
3,
{}));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr("AllToAll does not support bounded dynamic shapes")));
}
TEST(XlaBuilderTest, UnboundedAnd) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("s32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("s32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("s32[?, ?, 2, 2, <=2, <=2, ?]"));
And(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBatchNormGrad) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scale, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape mean, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape variance, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_scale, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_offset, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape grad_output, ParseShape("f32[5, ?, 7]"));
const Shape expected =
ShapeUtil::MakeTupleShape({grad_operand, grad_scale, grad_offset});
BatchNormGrad(
Parameter(&b, 0, operand, "operand"), Parameter(&b, 1, scale, "scale"),
Parameter(&b, 2, mean, "mean"), Parameter(&b, 3, variance, "variance"),
Parameter(&b, 4, grad_output, "grad_output"), 1.0, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBatchNormInference) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scale, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape offset, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape mean, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape variance, ParseShape("f32[5]"));
BatchNormInference(
Parameter(&b, 0, operand, "operand"), Parameter(&b, 1, scale, "scale"),
Parameter(&b, 2, offset, "offset"), Parameter(&b, 3, mean, "mean"),
Parameter(&b, 4, variance, "variance"), 1.0, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBatchNormTraining) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape output, ParseShape("f32[?, ?, 7]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scale, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape offset, ParseShape("f32[5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape batch_mean, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape batch_var, ParseShape("f32[?]"));
const Shape expected =
ShapeUtil::MakeTupleShape({output, batch_mean, batch_var});
BatchNormTraining(Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, scale, "scale"),
Parameter(&b, 2, offset, "offset"), 1.0, 1);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBitcastConvert) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f16[?, 10, 2]"));
BitcastConvertType(Parameter(&b, 0, operand, "operand"), PrimitiveType::F16);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBroadcastUnsupportedOperand) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[<=3, ?]"));
Broadcast(Parameter(&b, 0, operand, "operand"), {1});
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("is_unbounded_dynamic")));
}
TEST(XlaBuilderTest, UnboundedBroadcastUnsupportedBroadcastSize) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1]"));
Broadcast(Parameter(&b, 0, operand, "operand"),
{Shape::kUnboundedSize});
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_, HasSubstr("Non-broadcast dimensions must not be dynamic.")));
}
TEST(XlaBuilderTest, UnboundedBroadcastInDim) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[<=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[<=2, 3, 4]"));
BroadcastInDim(Parameter(&b, 0, operand, "operand"),
{2, 3, 4},
{0, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedBroadcastInDimUnsupported) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[<=3, ?]"));
BroadcastInDim(Parameter(&b, 0, operand, "operand"),
{2, 3, Shape::kUnboundedSize},
{0, 2});
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("BroadcastInDim output must shape be "
"static or bounded dynamic")));
}
TEST(XlaBuilderTest, UnboundedCall) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, operand, "arg0"),
Parameter(sub_builder.get(), 1, operand, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
Call(&b, computation,
{Parameter(&b, 0, operand, "arg0"), Parameter(&b, 1, operand, "arg1")});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedCholesky) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape a, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Cholesky(Parameter(&b, 0, a, "a"), true);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClamp) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarMinImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarMinMaxImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarOperandMaxImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedClampScalarMinOperandImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
UnboundedClampUnsupportedDegenerateOperandImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
Clamp(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("Unimplemented implicit broadcast.")));
}
TEST(XlaBuilderTest, UnboundedCollectiveBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
CollectiveBroadcast(Parameter(&b, 0, operand, "operand"),
{});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedCollectivePermute) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
CollectivePermute(Parameter(&b, 0, operand, "operand"),
{std::make_pair(0, 1)});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedCompare) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("pred[?, ?, 2, 2, <=2, <=2, ?]"));
Compare(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
{});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedConcatenate) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand1,
ParseShape("f32[3, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape operand2,
ParseShape("f32[?, 4, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape operand3,
ParseShape("f32[?, ?, 2, 2, <=2, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[3, 4, ?, 2, <=2, <=2, ?]"));
ConcatInDim(&b,
{Parameter(&b, 0, operand1, "operand1"),
Parameter(&b, 1, operand2, "operand2"),
Parameter(&b, 2, operand3, "operand3")},
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedConvert) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("s32[?]"));
ConvertElementType(Parameter(&b, 0, operand, "operand"), PrimitiveType::S32);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedConvolution) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 2, ?, 128]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2, 2, <=128, 8]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 1, ?, 8]"));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
ConvWithGeneralDimensions(Parameter(&b, 0, lhs, "lhs"),
Parameter(&b, 1, rhs, "rhs"),
{1, 1}, Padding::kValid, dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDot) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Dot(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDotGeneral) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("f32[?, <=3, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[2, 4, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, <=3, 5]"));
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
dnums.add_lhs_batch_dimensions(0);
dnums.add_rhs_batch_dimensions(0);
DotGeneral(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"), dnums);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDynamicSlice) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape start_indices, ParseShape("s32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2, 2]"));
DynamicSlice(Parameter(&b, 0, operand, "operand"),
{
Parameter(&b, 1, start_indices, "start_indices0"),
Parameter(&b, 2, start_indices, "start_indices1"),
},
{2, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedDynamicUpdateSlice) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape update, ParseShape("f32[?, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape start_indices, ParseShape("s32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
DynamicUpdateSlice(Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, update, "update"),
{Parameter(&b, 2, start_indices, "start_indices0"),
Parameter(&b, 3, start_indices, "start_indices1")});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[2, <=5, ?]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::FFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithIFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c64[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c64[2, <=5, ?]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f64[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("c128[2, <=5, 6]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::RFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedFftWithIRFFT) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("c128[2, <=5, ?]"));
const std::vector<int64_t> fft_length = {5, 10};
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f64[2, <=5, 10]"));
Fft(Parameter(&b, 0, operand, "operand"), FftType::IRFFT,
fft_length);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedGather) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[3, 4, 2]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape start_indices,
ParseShape("s32[?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?, 2, 2]"));
GatherDimensionNumbers dimension_numbers;
dimension_numbers.add_offset_dims(2);
dimension_numbers.add_offset_dims(3);
dimension_numbers.add_collapsed_slice_dims(0);
dimension_numbers.add_start_index_map(1);
dimension_numbers.add_start_index_map(0);
dimension_numbers.set_index_vector_dim(2);
Gather(Parameter(&b, 0, operand, "operand"),
Parameter(&b, 1, start_indices, "start_indices"), dimension_numbers,
{1, 2, 2});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedGetTupleElement) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
GetTupleElement(Tuple(&b, {Parameter(&b, 0, operand, "operand")}), 0);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedInfeed) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Infeed(&b, shape, "");
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedInfeedWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("(f32[?, 10], token[])"));
InfeedWithToken(CreateToken(&b), shape, "");
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<xla::HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedMap) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand0, ParseShape("f32[2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape operand1, ParseShape("f32[?, 3, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2, ?, ?]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
Map(&b,
{Parameter(&b, 0, operand0, "operand0"),
Parameter(&b, 1, operand1, "operand1")},
computation, {0, 1, 2},
{});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedOptimizationBarrier) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
OptimizationBarrier(Parameter(&b, 0, operand, "operand"));
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedOr) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("s32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("s32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("s32[?, ?, 2, 2, <=2, <=2, ?]"));
Or(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedOutfeed) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape_with_layout,
ParseShape("f32[?, 10]"));
Outfeed(Parameter(&b, 0, operand, "operand"),
shape_with_layout, "");
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedOutfeedWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape_with_layout,
ParseShape("f32[?, 10]"));
OutfeedWithToken(Parameter(&b, 0, operand, "operand"),
CreateToken(&b),
shape_with_layout,
"");
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedPad) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 21]"));
PaddingConfig padding_config;
for (int i = 0; i < 2; i++) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(1);
dimension->set_edge_padding_high(1);
dimension->set_interior_padding(1);
}
Pad(Parameter(&b, 0, operand, "operand"),
ConstantR0<float>(&b, 0), padding_config);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRecv) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
Recv(&b, shape, handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedRecvFromHost) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::HOST_TO_DEVICE);
RecvFromHost(CreateToken(&b), shape, handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedRecvWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
RecvWithToken(CreateToken(&b), shape, handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedReduce) {
XlaBuilder b(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {7}, {false});
const Shape expected = ShapeUtil::MakeTupleShape({shape, shape, shape});
TF_ASSERT_OK_AND_ASSIGN(const Shape input0, ParseShape("f32[7, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape input1, ParseShape("f32[?, 5]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape input2, ParseShape("f32[7, ?]"));
const Shape scalar_f32 = ShapeUtil::MakeShape(F32, {});
const XlaOp init = Parameter(&b, 3, scalar_f32, "init");
XlaBuilder bsum(TestName());
std::vector<XlaOp> output_operands = {
Add(Parameter(&bsum, 0, scalar_f32, "arg0"),
Parameter(&bsum, 1, scalar_f32, "arg1")),
Add(Parameter(&bsum, 2, scalar_f32, "arg2"),
Parameter(&bsum, 3, scalar_f32, "arg3")),
Add(Parameter(&bsum, 4, scalar_f32, "arg4"),
Parameter(&bsum, 5, scalar_f32, "arg5"))};
Tuple(&bsum, absl::MakeSpan(output_operands));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation sum, bsum.Build());
Reduce(
&b,
{Parameter(&b, 0, input0, "input0"), Parameter(&b, 1, input1, "input1"),
Parameter(&b, 2, input2, "input2")},
{init, init, init}, sum, {1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReducePrecision) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
ReducePrecision(Parameter(&b, 0, operand, "operand"), 2,
2);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReduceScatter) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, operand, "arg0"),
Parameter(sub_builder.get(), 1, operand, "arg1"));
TF_ASSERT_OK_AND_ASSIGN(computation, sub_builder->Build());
}
ReplicaGroup replica_group;
replica_group.add_replica_ids(0);
replica_group.add_replica_ids(1);
ReduceScatter(
Parameter(&b, 0, operand, "operand"),
computation,
0,
2,
{replica_group});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReduceWindow) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape input, ParseShape("f32[?, 4, 8]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 3, 5]"));
XlaBuilder bsum(TestName());
Add(Parameter(&bsum, 0, ShapeUtil::MakeShape(F32, {}), "x"),
Parameter(&bsum, 1, ShapeUtil::MakeShape(F32, {}), "y"));
TF_ASSERT_OK_AND_ASSIGN(const XlaComputation sum, bsum.Build());
ReduceWindow(Parameter(&b, 0, input, "input"), ConstantR0<float>(&b, 0.f),
sum,
{1, 2, 4},
{1, 1, 1}, Padding::kValid);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReshape) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[2,3]"));
Reshape(Parameter(&b, 0, operand, "operand"), {0},
{2, 3});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedReshapeUnsupportedOutputShape) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[6]"));
Reshape(Parameter(&b, 0, operand, "operand"), {0},
{Shape::kUnboundedSize, Shape::kUnboundedSize});
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"Reshaping with unbounded result shape is not supported.")));
}
TEST(XlaBuilderTest, UnboundedReshapeUnsupportedInferredShape) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?]"));
Reshape(operand, Parameter(&b, 0, operand, "operand"));
EXPECT_THAT(
BuildHloModule(b),
StatusIs(_,
HasSubstr(
"Reshaping with unbounded result shape is not supported.")));
}
TEST(XlaBuilderTest, UnboundedReverse) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Rev(Parameter(&b, 0, operand, "operand"), {0, 1});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRngBitGenerator) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape initial_state, ParseShape("u32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("u32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("(u32[?, 10], u32[?, 10])"));
RngBitGenerator(RandomAlgorithm::RNG_DEFAULT,
Parameter(&b, 0, initial_state, "initial_state"), shape);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRngNormal) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
RngNormal(Parameter(&b, 0, ShapeUtil::MakeScalarShape(F32), "mu"),
Parameter(&b, 1, ShapeUtil::MakeScalarShape(F32), "sigma"), shape);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedRngUniform) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
RngUniform(Parameter(&b, 0, ShapeUtil::MakeScalarShape(F32), "a"),
Parameter(&b, 1, ShapeUtil::MakeScalarShape(F32), "b"), shape);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedScatter) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape input, ParseShape("f32[?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape scatter_indices,
ParseShape("s32[?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape updates, ParseShape("f32[?, ?, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, ?, ?]"));
XlaComputation update_computation;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(update_computation, sub_builder->Build());
}
ScatterDimensionNumbers dimension_numbers;
dimension_numbers.add_update_window_dims(2);
dimension_numbers.add_update_window_dims(3);
dimension_numbers.add_inserted_window_dims(0);
dimension_numbers.add_scatter_dims_to_operand_dims(1);
dimension_numbers.add_scatter_dims_to_operand_dims(0);
dimension_numbers.set_index_vector_dim(2);
Scatter(Parameter(&b, 0, input, "input"),
Parameter(&b, 1, scatter_indices, "scatter_indices"),
Parameter(&b, 2, updates, "updates"), update_computation,
dimension_numbers, false,
false);
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelect) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("pred[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("f32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs,
ParseShape("f32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[1, 1, 2, 2, <=2, <=2, ?]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarPred) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarOnTrueOnFalseImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarPredOnFalseImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSelectScalarPredOnTrueImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest,
UnboundedSelectUnsupportedDegenerateOperandImplicitBroadcast) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs, ParseShape("pred[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs, ParseShape("f32[1]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape ehs, ParseShape("f32[?, 10]"));
Select(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
Parameter(&b, 2, ehs, "ehs"));
EXPECT_THAT(BuildHloModule(b),
StatusIs(_, HasSubstr("Unimplemented implicit broadcast.")));
}
TEST(XlaBuilderTest, UnboundedSelectAndScatter) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape source, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape init_value, ParseShape("f32[]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation select;
{
const std::unique_ptr<XlaBuilder> sub_builder =
b.CreateSubBuilder("compare");
Compare(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"),
ComparisonDirection::kGe);
TF_ASSERT_OK_AND_ASSIGN(select, sub_builder->Build());
}
XlaComputation scatter;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(scatter, sub_builder->Build());
}
SelectAndScatter(Parameter(&b, 0, operand, "operand"), select,
std::array<int64_t, 2>({3, 1}),
std::array<int64_t, 2>({2, 1}),
Padding::kValid, Parameter(&b, 1, source, "source"),
Parameter(&b, 2, init_value, "init_value"), scatter);
TF_ASSERT_OK_AND_ASSIGN(auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSend) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
Send(Parameter(&b, 0, operand, "operand"), handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedSendToHost) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape shape_with_layout,
ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_HOST);
SendToHost(Parameter(&b, 0, operand, "operand"),
CreateToken(&b), shape_with_layout,
handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedSendWithToken) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
ChannelHandle handle;
handle.set_handle(1);
handle.set_type(ChannelHandle::DEVICE_TO_DEVICE);
SendWithToken(Parameter(&b, 0, operand, "operand"),
CreateToken(&b), handle);
EXPECT_IS_OK(BuildHloModule(b));
}
TEST(XlaBuilderTest, UnboundedSlice) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[1, <=3, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[1, <=2, 3]"));
Slice(Parameter(&b, 0, operand, "operand"),
{0, 1, 2},
{1, 3, 5},
{1, 1, 1});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedSort) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?, 10]"));
XlaComputation comparator;
{
const std::unique_ptr<XlaBuilder> sub_builder =
b.CreateSubBuilder("compare");
Compare(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"),
ComparisonDirection::kLt);
TF_ASSERT_OK_AND_ASSIGN(comparator, sub_builder->Build());
}
Sort({Parameter(&b, 0, operand, "operand")}, comparator,
0, true);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedTranspose) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand,
ParseShape("f32[1, ?, 2, ?, <=2]{4,3,2,1,0}"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("f32[<=2, 1, ?, 2, ?]{0,2,3,4,1}"));
Transpose(Parameter(&b, 0, operand, "operand"),
{4, 0, 3, 2, 1});
TF_ASSERT_OK_AND_ASSIGN(const auto module, BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedTriangularSolve) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape a_shape, ParseShape("f32[?, 10]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape b_shape, ParseShape("f32[10, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[10, ?]"));
TriangularSolveOptions options;
TriangularSolve(Parameter(&b, 0, a_shape, "a"),
Parameter(&b, 1, b_shape, "b"),
true, true, false,
TriangularSolveOptions::TRANSPOSE);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedTuple) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape operand, ParseShape("f32[?, 10]"));
const Shape expected = ShapeUtil::MakeTupleShape({operand});
Tuple(&b, {Parameter(&b, 0, operand, "operand")});
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedWhile) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape init, ParseShape("f32[?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected, ParseShape("f32[?]"));
XlaComputation add;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(Parameter(sub_builder.get(), 0, ShapeUtil::MakeScalarShape(F32),
"arg0"),
Parameter(sub_builder.get(), 1, ShapeUtil::MakeScalarShape(F32),
"arg1"));
TF_ASSERT_OK_AND_ASSIGN(add, sub_builder->Build());
}
XlaComputation condition;
{
const std::unique_ptr<XlaBuilder> sub_builder =
b.CreateSubBuilder("compare");
Ge(ConstantR0<float>(sub_builder.get(), 10.0f),
Reduce(Parameter(sub_builder.get(), 0, init, "prev"),
ConstantR0<float>(sub_builder.get(), 0.0f), add,
{0}));
TF_ASSERT_OK_AND_ASSIGN(condition, sub_builder->Build());
}
XlaComputation body;
{
const std::unique_ptr<XlaBuilder> sub_builder = b.CreateSubBuilder("add");
Add(ConstantR1<float>(sub_builder.get(), {1.0f}),
Parameter(sub_builder.get(), 0, init, "prev"),
{0});
TF_ASSERT_OK_AND_ASSIGN(body, sub_builder->Build());
}
While(condition, body, Parameter(&b, 0, init, "init"));
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
TEST(XlaBuilderTest, UnboundedXor) {
XlaBuilder b(TestName());
TF_ASSERT_OK_AND_ASSIGN(const Shape lhs,
ParseShape("s32[1, ?, 2, ?, <=2, ?, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape rhs,
ParseShape("s32[?, 1, ?, 2, ?, <=2, ?]"));
TF_ASSERT_OK_AND_ASSIGN(const Shape expected,
ParseShape("s32[?, ?, 2, 2, <=2, <=2, ?]"));
Xor(Parameter(&b, 0, lhs, "lhs"), Parameter(&b, 1, rhs, "rhs"),
empty_array);
TF_ASSERT_OK_AND_ASSIGN(const std::unique_ptr<HloModule> module,
BuildHloModule(b));
EXPECT_THAT(GetRoot(*module),
GmockMatch(m::Op().WithShapeEqualTo(&expected)));
}
INSTANTIATE_TEST_SUITE_P(UnboundedDynamism, XlaBuilderUnboundedUnaryOpTest,
::testing::ValuesIn<UnaryOpTestCase>(
{{"f32[?]", "f32[?]", &Abs},
{"f32[?]", "f32[?]", &Cbrt},
{"f32[?]", "f32[?]", &Ceil},
{"u32[?]", "u32[?]", &Clz},
{"f32[?]", "f32[?]", &Cos},
{"f32[?]", "f32[?]", &Erf},
{"f32[?]", "f32[?]", &Exp},
{"f32[?]", "f32[?]", &Expm1},
{"f32[?]", "f32[?]", &Floor},
{"f32[?]", "f32[?]", &Imag},
{"f32[?]", "pred[?]", &IsFinite},
{"f32[?]", "f32[?]", &Log},
{"f32[?]", "f32[?]", &Log1p},
{"f32[?]", "f32[?]", &Logistic},
{"f32[?]", "f32[?]", &Neg},
{"s32[?]", "s32[?]", &Not},
{"u32[?]", "u32[?]", &PopulationCount},
{"f32[?]", "f32[?]", &Real},
{"f32[?]", "f32[?]", &Round},
{"f32[?]", "f32[?]", &RoundNearestEven},
{"f32[?]", "f32[?]", &Rsqrt},
{"f32[?]", "f32[?]", &Sign},
{"f32[?]", "f32[?]", &Sin},
{"f32[?]", "f32[?]", &Sqrt},
{"f32[?]", "f32[?]", &Tanh}}));
INSTANTIATE_TEST_SUITE_P(
UnboundedDynamism, XlaBuilderUnboundedBinaryOpTest,
::testing::ValuesIn<BinaryOpTestCase>({
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Add},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Add},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Atan2},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "c64[?, ?, 2, 2, <=2, <=2, ?]",
&Complex},
{"f32[?, 10]", "f32[1]", zero_array,
"c64[?, 10]", &Complex},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Div},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Div},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Max},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Max},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Min},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Min},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Mul},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Mul},
{"f32[?, 10]", "f32[1]", zero_array,
"pred[?, 10]", &Ne},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Pow},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Pow},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Rem},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Rem},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&ShiftLeft},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &ShiftLeft},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&ShiftRightArithmetic},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &ShiftRightArithmetic},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&ShiftRightLogical},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &ShiftRightLogical},
{"f32[1, ?, 2, ?, <=2, ?, ?]", "f32[?, 1, ?, 2, ?, <=2, ?]",
empty_array, "f32[?, ?, 2, 2, <=2, <=2, ?]",
&Sub},
{"f32[?, 10]", "f32[1]", zero_array,
"f32[?, 10]", &Sub},
}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/xla_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/xla_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
746af95a-344b-481c-9b17-a794ea3ed4f6 | cpp | tensorflow/tensorflow | padding | tensorflow/lite/delegates/gpu/common/tasks/padding.cc | tensorflow/lite/delegates/gpu/cl/kernels/padding_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/padding.h"
#include <string>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GetPaddingCode(const OperationDef& op_def,
const PadAttributes& attr, GPUOperation* op) {
op->AddSrcTensor("src_tensor", op_def.src_tensors[0]);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
op->args_.AddInt("prepended_x", attr.prepended.w);
op->args_.AddInt("prepended_y", attr.prepended.h);
op->args_.AddInt("prepended_z", attr.prepended.c);
op->args_.AddInt("prepended_w", attr.prepended.b);
const std::string dst_batch =
op_def.dst_tensors[0].HasAxis(Axis::BATCH) ? "B" : "0";
std::string c;
const std::string channels[] = {".x", ".y", ".z", ".w"};
if (attr.type == PaddingContentType::REFLECT) {
c += "int reflect_coord(int x, int size) {\n";
c += " int t = abs(x) - size + 1;\n";
c += " return size - 1 - abs(t);\n";
c += "}\n\n";
}
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " args.src_tensor::type result = (" +
ToCLDataType(op_def.src_tensors[0].GetDataType(), 4) +
")(" + std::to_string(attr.constant_values) + ");\n";
c += " int s_x = X - args.prepended_x;\n";
c += " int s_y = Y - args.prepended_y;\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = " + dst_batch + " - args.prepended_w;\n";
c += " args.src_tensor.SetBatchRef(s_b);\n";
}
if (attr.type == PaddingContentType::REFLECT) {
c += " s_x = reflect_coord(s_x, args.src_tensor.Width());\n";
c += " s_y = reflect_coord(s_y, args.src_tensor.Height());\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = reflect_coord(s_b, args.src_tensor.Batch());\n";
}
if (attr.prepended.c == 0 && attr.appended.c == 0) {
c += " result = args.src_tensor.Read(s_x, s_y, Z);\n";
} else {
c += " int start_channel = Z * 4;\n";
for (int i = 0; i < 4; ++i) {
const auto& s = channels[i];
c += " {\n";
c += " int channel = start_channel + " + std::to_string(i) + ";\n";
c += " int s_z = channel - args.prepended_z;\n";
c += " s_z = clamp(reflect_coord(s_z, args.src_tensor.Channels()), "
"0, "
"args.src_tensor.Channels() - "
"1);\n";
c += " args.src_tensor.ReadPerChannel(result" + s +
", s_x, s_y, s_z);\n";
c += " }\n";
}
}
} else {
c += " bool inside_x = s_x >= 0 && s_x < args.src_tensor.Width();\n";
c += " bool inside_y = s_y >= 0 && s_y < args.src_tensor.Height();\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " inside_y = inside_y && (s_b >= 0 && s_b < "
"args.src_tensor.Batch());\n";
}
c += " if (inside_x && inside_y) {\n";
if (attr.prepended.c == 0 && attr.appended.c == 0) {
c += " result = args.src_tensor.Read(s_x, s_y, Z);\n";
} else if (attr.prepended.c % 4 == 0 &&
attr.prepended.b == attr.appended.b) {
c += " int s_z = Z - args.prepended_z / 4;\n";
c += " if (s_z >= 0 && s_z < args.src_tensor.Slices()) {\n";
c += " result = args.src_tensor.Read(s_x, s_y, s_z);\n";
c += " }\n";
} else {
c += " int start_channel = Z * 4;\n";
for (int i = 0; i < 4; ++i) {
const auto& s = channels[i];
c += " {\n";
c += " int channel = start_channel + " + std::to_string(i) + ";\n";
c += " int s_z = channel - args.prepended_z;\n";
c += " if (s_z >= 0 && s_z < args.src_tensor.Channels()) {\n";
c += " args.src_tensor.ReadPerChannel(result" + s +
", s_x, s_y, s_z);\n";
c += " }\n";
c += " }\n";
}
}
c += " }\n";
}
c += " args.dst_tensor.Write(result, X, Y, Z);\n";
c += "}\n";
return c;
}
}
GPUOperation CreatePadding(const OperationDef& definition,
const PadAttributes& attr) {
GPUOperation op(definition);
op.code_ = GetPaddingCode(definition, attr, &op);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
return op;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/padding_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, PaddingAppendWidth) {
auto status = PaddingAppendWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingAppendWidthConstValues) {
auto status = PaddingAppendWidthConstValuesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependWidth) {
auto status = PaddingPrependWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingAppendHeight) {
auto status = PaddingAppendHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependHeight) {
auto status = PaddingPrependHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingAppendChannels) {
auto status = PaddingAppendChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependChannels) {
auto status = PaddingPrependChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependChannelsX4) {
auto status = PaddingPrependChannelsX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingComplex) {
auto status = PaddingComplexTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingReflectWidth) {
auto status = PaddingReflectWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingReflectChannels) {
auto status = PaddingReflectChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/padding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/padding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4f270fd-bf69-449f-b2f8-d9f027296279 | cpp | tensorflow/tensorflow | value_inference | third_party/xla/xla/hlo/builder/value_inference.cc | third_party/xla/xla/tests/value_inference_test.cc | #include "xla/hlo/builder/value_inference.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
Literal CreatePredLiteral(bool pred, const Shape& reference_shape) {
if (reference_shape.IsTuple()) {
std::vector<Literal> sub_literals;
const auto& reference_shape_tuple_shapes = reference_shape.tuple_shapes();
sub_literals.reserve(reference_shape_tuple_shapes.size());
for (const Shape& shape : reference_shape_tuple_shapes) {
sub_literals.emplace_back(CreatePredLiteral(pred, shape));
}
return Literal::MoveIntoTuple(absl::MakeSpan(sub_literals));
}
PrimitiveType element_type = reference_shape.element_type();
if (element_type == TOKEN) {
return LiteralUtil::CreateR0(pred);
}
Literal literal = LiteralUtil::CreateR0(pred);
Literal literal_broadcast =
literal.Broadcast(ShapeUtil::ChangeElementType(reference_shape, PRED), {})
.value();
return literal_broadcast;
}
Literal CreateS64Literal(int64_t value, const Shape& reference_shape) {
if (reference_shape.IsTuple()) {
std::vector<Literal> sub_literals;
const auto& reference_shape_tuple_shapes = reference_shape.tuple_shapes();
sub_literals.reserve(reference_shape_tuple_shapes.size());
for (const Shape& shape : reference_shape_tuple_shapes) {
sub_literals.emplace_back(CreateS64Literal(value, shape));
}
return Literal::MoveIntoTuple(absl::MakeSpan(sub_literals));
}
PrimitiveType element_type = reference_shape.element_type();
if (element_type == TOKEN) {
return LiteralUtil::CreateToken();
}
Literal literal = LiteralUtil::CreateR0<int64_t>(value);
return literal
.Broadcast(ShapeUtil::ChangeElementType(reference_shape, S64), {})
.value();
}
Literal CreateGarbageLiteral(const Shape& reference_shape) {
if (reference_shape.IsTuple()) {
std::vector<Literal> sub_literals;
for (const Shape& shape : reference_shape.tuple_shapes()) {
sub_literals.emplace_back(CreateGarbageLiteral(shape));
}
return Literal::MoveIntoTuple(absl::MakeSpan(sub_literals));
}
PrimitiveType element_type = reference_shape.element_type();
if (element_type == TOKEN) {
return LiteralUtil::CreateToken();
}
Literal literal = LiteralUtil::One(element_type);
return literal.Broadcast(reference_shape, {}).value();
}
struct HloProtoEvaluator {
explicit HloProtoEvaluator(HloEvaluator& evaluator, HloInstructionProto inst)
: evaluator(evaluator),
inst(std::move(inst)),
module("EmptyModuleForEvaluation", HloModuleConfig()) {}
HloProtoEvaluator& WithComputation(
std::unique_ptr<HloComputation> new_computation) {
computation = new_computation.get();
computation->ClearUniqueIdInternal();
for (HloInstruction* inst : computation->instructions()) {
inst->ClearUniqueIdInternal();
}
module.AddEmbeddedComputation(std::move(new_computation));
return *this;
}
HloProtoEvaluator& WithPrimitiveType(PrimitiveType new_primitive_type) {
primitive_type = new_primitive_type;
return *this;
}
HloProtoEvaluator& WithOpCode(HloOpcode new_opcode) {
opcode = new_opcode;
return *this;
}
HloProtoEvaluator& WithOperands(absl::Span<Literal> operands) {
this->operands = operands;
return *this;
}
HloProtoEvaluator& WithSubshape(ShapeIndex shape_index) {
this->shape_index = std::move(shape_index);
return *this;
}
absl::StatusOr<Literal> Evaluate() {
HloComputation::Builder builder("EmptyComputation");
absl::flat_hash_map<int64_t, HloInstruction*> operand_map;
for (int64_t i = 0; i < inst.operand_ids_size(); ++i) {
int64_t operand_handle = inst.operand_ids(i);
std::unique_ptr<HloInstruction> operand =
HloInstruction::CreateConstant(operands[i].Clone());
operand_map[operand_handle] = operand.get();
builder.AddInstruction(std::move(operand));
}
if (primitive_type.has_value()) {
*inst.mutable_shape() = ShapeUtil::ChangeElementType(
Shape(inst.shape()), primitive_type.value())
.ToProto();
}
if (opcode.has_value()) {
*inst.mutable_opcode() = std::string(HloOpcodeString(opcode.value()));
}
absl::flat_hash_map<int64_t, HloComputation*> computation_map;
if (inst.called_computation_ids_size() != 0) {
TF_RET_CHECK(inst.called_computation_ids_size() == 1 &&
computation != nullptr)
<< inst.DebugString();
computation_map[inst.called_computation_ids(0)] = computation;
}
TF_ASSIGN_OR_RETURN(
auto new_instruction,
HloInstruction::CreateFromProto(inst, operand_map, computation_map));
new_instruction->ClearUniqueIdInternal();
builder.AddInstruction(std::move(new_instruction));
auto computation = builder.Build();
module.AddEntryComputation(std::move(computation));
if (shape_index.empty()) {
return evaluator.Evaluate(module.entry_computation()->root_instruction());
} else {
TF_ASSIGN_OR_RETURN(
auto result,
evaluator.Evaluate(module.entry_computation()->root_instruction()));
return result.SubLiteral(this->shape_index);
}
}
HloEvaluator& evaluator;
HloInstructionProto inst;
HloModule module;
absl::Span<Literal> operands;
ShapeIndex shape_index = {};
HloComputation* computation = nullptr;
std::optional<PrimitiveType> primitive_type = std::nullopt;
std::optional<HloOpcode> opcode = std::nullopt;
};
enum PostorderDFSNodeType {
kConstantValue = 0,
kConstantUpperBound,
kConstantLowerBound,
kValueIsDynamic,
kBoundIsDynamic,
};
std::string PostorderDFSNodeTypeToString(PostorderDFSNodeType type) {
switch (type) {
case kConstantValue:
return "kConstantValue";
case kConstantUpperBound:
return "kConstantUpperBound";
case kConstantLowerBound:
return "kConstantLowerBound";
case kValueIsDynamic:
return "kValueIsDynamic";
case kBoundIsDynamic:
return "kBoundIsDynamic";
}
}
struct InferenceContext {
explicit InferenceContext(ShapeIndex shape_index,
std::vector<int64_t> caller_operand_handles)
: shape_index(std::move(shape_index)),
caller_operand_handles(std::move(caller_operand_handles)) {}
ShapeIndex shape_index;
std::vector<int64_t> caller_operand_handles;
};
struct PostorderDFSDep {
explicit PostorderDFSDep(int64_t handle, PostorderDFSNodeType type,
InferenceContext context, std::string annotation)
: handle(handle),
type(type),
context(std::move(context)),
annotation(std::move(annotation)) {}
int64_t handle;
PostorderDFSNodeType type;
InferenceContext context;
std::string annotation;
};
using Visit = std::function<absl::StatusOr<Literal>(absl::Span<Literal>)>;
using Visit0D = std::function<absl::StatusOr<Literal>()>;
using Visit1D = std::function<absl::StatusOr<Literal>(Literal)>;
using Visit2D = std::function<absl::StatusOr<Literal>(Literal, Literal)>;
struct [[nodiscard]] PostorderDFSNode {
PostorderDFSNode& AddDependency(int64_t handle, PostorderDFSNodeType type,
InferenceContext context,
std::string annotation = "") {
dependencies.emplace_back(handle, type, std::move(context),
std::move(annotation));
return *this;
}
PostorderDFSNode& AddVisit(const Visit& visit) {
this->visit = visit;
return *this;
}
PostorderDFSNode& AddVisit(const Visit0D& visit) {
this->visit = [visit](absl::Span<Literal> literals) { return visit(); };
return *this;
}
PostorderDFSNode& AddVisit(const Visit1D& visit) {
this->visit = [visit](absl::Span<Literal> literals) {
return visit(std::move(literals[0]));
};
return *this;
}
PostorderDFSNode& AddVisit(const Visit2D& visit) {
this->visit = [visit](absl::Span<Literal> literals) {
return visit(std::move(literals[0]), std::move(literals[1]));
};
return *this;
}
std::vector<PostorderDFSDep> dependencies;
Visit visit;
};
using HandleToInstruction =
std::function<absl::StatusOr<const HloInstructionProto*>(int64_t)>;
using HandleToComputation = std::function<const HloComputationProto*(int64_t)>;
struct PostorderDFSVisitor {
PostorderDFSVisitor(HloEvaluator& evaluator,
HandleToInstruction handle_to_instruction,
HandleToComputation handle_to_computation)
: evaluator(evaluator),
handle_to_instruction(handle_to_instruction),
handle_to_computation(handle_to_computation) {}
absl::StatusOr<PostorderDFSNode> AnalyzeUpperBound(int64_t handle,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeLowerBound(int64_t handle,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeIsDynamic(int64_t handle,
PostorderDFSNodeType type,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeConstant(int64_t handle,
InferenceContext context);
absl::StatusOr<PostorderDFSNode> AnalyzeConstantValueFallback(
int64_t handle, PostorderDFSNodeType type, InferenceContext context);
absl::StatusOr<Literal> PostOrderDFSVisit(int64_t handle,
PostorderDFSNodeType type);
bool IsValueEffectiveInteger(int64_t handle) {
const HloInstructionProto* instr = handle_to_instruction(handle).value();
if (primitive_util::IsIntegralType(instr->shape().element_type())) {
return true;
}
HloOpcode opcode = StringToHloOpcode(instr->opcode()).value();
if (opcode != HloOpcode::kConvert) {
return false;
}
const HloInstructionProto* parent =
handle_to_instruction(instr->operand_ids(0)).value();
if (primitive_util::IsIntegralType(parent->shape().element_type())) {
return true;
}
return false;
}
bool IsInstructionOverLimit(const HloInstructionProto* proto,
const InferenceContext& context) {
auto subshape = std::make_unique<Shape>(
ShapeUtil::GetSubshape(Shape(proto->shape()), context.shape_index));
if (subshape->IsArray() &&
ShapeUtil::ElementsIn(*subshape) > kLargeShapeElementLimit) {
return true;
}
HloOpcode opcode = StringToHloOpcode(proto->opcode()).value();
for (int64_t operand_id : proto->operand_ids()) {
const HloInstructionProto* operand =
handle_to_instruction(operand_id).value();
auto operand_shape = std::make_unique<Shape>(operand->shape());
if (operand_shape->IsArray() &&
ShapeUtil::ElementsIn(*operand_shape) > kLargeShapeElementLimit &&
opcode != HloOpcode::kGetDimensionSize &&
opcode != HloOpcode::kSetDimensionSize) {
return true;
}
}
return false;
}
struct CacheKey {
CacheKey(int64_t handle, InferenceContext context,
PostorderDFSNodeType type)
: handle(handle), context(context), type(type) {}
int64_t handle;
InferenceContext context;
PostorderDFSNodeType type;
template <typename H>
friend H AbslHashValue(H h, const CacheKey& key) {
h = H::combine(std::move(h), key.handle);
h = H::combine(std::move(h), key.context.shape_index.ToString());
h = H::combine(std::move(h),
VectorString(key.context.caller_operand_handles));
h = H::combine(std::move(h), key.type);
return h;
}
friend bool operator==(const CacheKey& lhs, const CacheKey& rhs) {
return lhs.handle == rhs.handle &&
lhs.context.shape_index == rhs.context.shape_index &&
lhs.context.caller_operand_handles ==
rhs.context.caller_operand_handles &&
lhs.type == rhs.type;
}
};
HloEvaluator& evaluator;
absl::flat_hash_map<CacheKey, Literal> evaluated;
HandleToInstruction handle_to_instruction;
HandleToComputation handle_to_computation;
static constexpr int64_t kLargeShapeElementLimit = 1000 * 1000;
};
PostorderDFSNode CreateAllDynamicResult(const Shape& shape,
const PostorderDFSNodeType& type) {
return PostorderDFSNode().AddVisit(
[shape, type](absl::Span<Literal>) -> Literal {
if (type == PostorderDFSNodeType::kConstantValue ||
type == PostorderDFSNodeType::kConstantUpperBound ||
type == PostorderDFSNodeType::kConstantLowerBound) {
return CreateGarbageLiteral(shape);
} else {
return CreatePredLiteral(true, shape);
}
});
}
}
absl::StatusOr<PostorderDFSNode>
PostorderDFSVisitor::AnalyzeConstantValueFallback(int64_t handle,
PostorderDFSNodeType type,
InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
PostorderDFSNode result;
for (auto operand_id : root->operand_ids()) {
InferenceContext dep_context = context;
dep_context.shape_index = {};
result.AddDependency(operand_id, type, dep_context);
}
switch (opcode) {
case HloOpcode::kRng:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kCustomCall:
case HloOpcode::kWhile:
case HloOpcode::kSend:
case HloOpcode::kRecv:
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
case HloOpcode::kParameter: {
if (opcode == HloOpcode::kParameter &&
!context.caller_operand_handles.empty()) {
int64_t caller_operand = context.caller_operand_handles.back();
context.caller_operand_handles.pop_back();
return result.AddDependency(caller_operand, type, context)
.AddVisit([](Literal literal) { return literal; });
}
return CreateAllDynamicResult(subshape, type);
}
case HloOpcode::kSubtract:
case HloOpcode::kCos:
case HloOpcode::kSin:
case HloOpcode::kTan:
case HloOpcode::kNegate:
case HloOpcode::kAbs:
case HloOpcode::kDivide:
case HloOpcode::kGetDimensionSize: {
return InvalidArgument(
"AnalyzeConstantValueFallback can't handle opcode: %s",
root->opcode());
}
case HloOpcode::kCall: {
auto node = PostorderDFSNode();
auto* call_proto = root;
if (call_proto->operand_ids_size() != 1) {
return CreateAllDynamicResult(subshape, type);
}
int64_t called_root =
handle_to_computation(call_proto->called_computation_ids(0))
->root_id();
InferenceContext call_context = context;
call_context.caller_operand_handles.push_back(call_proto->operand_ids(0));
node.AddDependency(called_root, PostorderDFSNodeType::kConstantValue,
call_context, "callee's root instruction");
return node.AddVisit([](Literal operand) -> absl::StatusOr<Literal> {
return std::move(operand);
});
}
case HloOpcode::kConditional: {
auto node = PostorderDFSNode();
auto* conditional_proto = root;
InferenceContext predicate_context = context;
predicate_context.shape_index = {};
node.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kConstantValue,
predicate_context)
.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kValueIsDynamic,
predicate_context);
const int64_t branch_size =
conditional_proto->called_computation_ids_size();
for (int64_t i = 0; i < branch_size; ++i) {
int64_t branch_root =
handle_to_computation(conditional_proto->called_computation_ids(i))
->root_id();
InferenceContext branch_context = context;
branch_context.caller_operand_handles.push_back(
conditional_proto->operand_ids(i + 1));
node.AddDependency(branch_root, PostorderDFSNodeType::kConstantValue,
branch_context);
}
return node.AddVisit(
[](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
int64_t pred_is_dynamic = operands[1].Get<bool>({});
if (pred_is_dynamic) {
return std::move(operands[2]);
} else {
int64_t branch_index = 0;
if (operands[0].shape().element_type() == PRED) {
if (operands[0].Get<bool>({})) {
branch_index = 0;
} else {
branch_index = 1;
}
} else {
branch_index = operands[0].GetIntegralAsS64({}).value();
}
const int64_t branch_dynamism_index = 2 + branch_index;
return std::move(operands[branch_dynamism_index]);
}
});
}
case HloOpcode::kGetTupleElement: {
int64_t operand_handle = root->operand_ids(0);
PostorderDFSNode result;
context.shape_index.push_front(root->tuple_index());
return PostorderDFSNode()
.AddDependency(operand_handle, type, context)
.AddVisit([](Literal operand) { return operand; });
}
case HloOpcode::kReduce:
case HloOpcode::kSort:
case HloOpcode::kScatter:
case HloOpcode::kReduceWindow: {
const HloComputationProto* computation_proto =
handle_to_computation(root->called_computation_ids(0));
return result.AddVisit(
[root, computation_proto, context,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(
auto computation,
HloComputation::CreateFromProto(*computation_proto, {}));
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithComputation(std::move(computation))
.WithSubshape(context.shape_index)
.Evaluate();
});
}
default: {
if (opcode == HloOpcode::kTuple && !context.shape_index.empty()) {
int64_t tuple_operand_index = context.shape_index.front();
InferenceContext tuple_operand_context = context;
tuple_operand_context.shape_index.pop_front();
return PostorderDFSNode()
.AddDependency(root->operand_ids(tuple_operand_index), type,
tuple_operand_context)
.AddVisit([](Literal operand) { return operand; });
}
return result.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeUpperBound(
int64_t handle, InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
if (IsInstructionOverLimit(root, context)) {
return CreateAllDynamicResult(subshape,
PostorderDFSNodeType::kConstantUpperBound);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
const HloInstructionProto* operand_proto =
handle_to_instruction(operand_handle).value();
return PostorderDFSNode().AddVisit(
[operand_proto, dimension]() -> absl::StatusOr<Literal> {
return LiteralUtil::CreateR0<int32_t>(
operand_proto->shape().dimensions(dimension));
});
}
case HloOpcode::kAbs: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([this](Literal lower_bound,
Literal upper_bound) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(auto lower_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, lower_bound));
TF_ASSIGN_OR_RETURN(auto upper_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, upper_bound));
return evaluator.EvaluateElementwiseBinaryOp(
HloOpcode::kMaximum, lower_bound_abs, upper_bound_abs);
});
}
case HloOpcode::kSort: {
auto dfs = PostorderDFSNode();
InferenceContext dep_context = context;
dep_context.shape_index = {};
if (!context.shape_index.empty()) {
dfs.AddDependency(root->operand_ids(context.shape_index[0]),
PostorderDFSNodeType::kConstantUpperBound,
dep_context);
} else {
for (int64_t i = 0; i < root->operand_ids_size(); ++i) {
dfs.AddDependency(root->operand_ids(i),
PostorderDFSNodeType::kConstantUpperBound,
dep_context);
}
}
return dfs.AddVisit(
[root,
context](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
std::vector<Literal> results;
results.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
auto max = LiteralUtil::MaxElement(operands[i]);
results.emplace_back(
max.Broadcast(operands[i].shape(), {}).value());
}
if (ShapeUtil::GetSubshape(Shape(root->shape()),
context.shape_index)
.IsTuple()) {
return LiteralUtil::MakeTupleOwned(std::move(results));
} else {
return std::move(results[0]);
}
});
}
case HloOpcode::kNegate: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddVisit([this](Literal lower_bound) -> absl::StatusOr<Literal> {
return evaluator.EvaluateElementwiseUnaryOp(HloOpcode::kNegate,
lower_bound);
});
}
case HloOpcode::kSubtract:
case HloOpcode::kDivide: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddVisit([root, opcode, this](
Literal upper_bound,
Literal lower_bound) -> absl::StatusOr<Literal> {
if (opcode == HloOpcode::kDivide &&
this->IsValueEffectiveInteger(root->operand_ids(1))) {
auto zero = LiteralUtil::Zero(lower_bound.shape().element_type());
zero = zero.Broadcast(lower_bound.shape(), {}).value();
TF_ASSIGN_OR_RETURN(
auto lower_bound_is_zero,
evaluator.EvaluateElementwiseCompareOp(
ComparisonDirection::kEq, lower_bound, zero));
auto one = LiteralUtil::One(lower_bound.shape().element_type());
one = one.Broadcast(lower_bound.shape(), {}).value();
TF_ASSIGN_OR_RETURN(
lower_bound, evaluator.EvaluateElementwiseTernaryOp(
HloOpcode::kSelect, lower_bound_is_zero, one,
lower_bound));
}
std::vector<Literal> new_operands;
new_operands.emplace_back(std::move(upper_bound));
new_operands.emplace_back(std::move(lower_bound));
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(absl::MakeSpan(new_operands))
.Evaluate();
});
}
case HloOpcode::kCustomCall: {
if (root->custom_call_target() == "SetBound") {
return PostorderDFSNode().AddVisit([root]() -> absl::StatusOr<Literal> {
if (root->literal().shape().element_type() == TUPLE) {
return Literal::CreateFromProto(root->literal().tuple_literals(0));
} else {
return Literal::CreateFromProto(root->literal());
}
});
} else if (root->custom_call_target() == "Sharding") {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([](Literal operand) { return operand; });
}
return InvalidArgument(
"Upper-bound inferencing on custom call %s is not supported",
root->DebugString());
}
case HloOpcode::kGather: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantValue, context)
.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
default:
return AnalyzeConstantValueFallback(
handle, PostorderDFSNodeType::kConstantUpperBound, context);
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeLowerBound(
int64_t handle, InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
if (IsInstructionOverLimit(root, context)) {
return CreateAllDynamicResult(subshape,
PostorderDFSNodeType::kConstantLowerBound);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
handle_to_instruction(operand_handle));
return PostorderDFSNode().AddVisit(
[dimension, operand_proto]() -> absl::StatusOr<Literal> {
if (operand_proto->shape().is_dynamic_dimension(dimension)) {
return LiteralUtil::CreateR0<int32_t>(0);
} else {
return LiteralUtil::CreateR0<int32_t>(
operand_proto->shape().dimensions(dimension));
}
});
}
case HloOpcode::kAbs: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([this](Literal lower_bound,
Literal upper_bound) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(auto lower_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, lower_bound));
TF_ASSIGN_OR_RETURN(auto upper_bound_abs,
evaluator.EvaluateElementwiseUnaryOp(
HloOpcode::kAbs, upper_bound));
return evaluator.EvaluateElementwiseBinaryOp(
HloOpcode::kMinimum, lower_bound_abs, upper_bound_abs);
});
}
case HloOpcode::kNegate: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit([this](Literal upper_bound) -> absl::StatusOr<Literal> {
return evaluator.EvaluateElementwiseUnaryOp(HloOpcode::kNegate,
upper_bound);
});
}
case HloOpcode::kSubtract:
case HloOpcode::kDivide: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantUpperBound, context)
.AddVisit(
[root,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
case HloOpcode::kGather: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantLowerBound, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantValue, context)
.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
default:
return AnalyzeConstantValueFallback(
handle, PostorderDFSNodeType::kConstantLowerBound, context);
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeConstant(
int64_t handle, InferenceContext context) {
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
HloOpcode opcode = StringToHloOpcode(root->opcode()).value();
Shape subshape =
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index);
if (IsInstructionOverLimit(root, context)) {
return CreateAllDynamicResult(subshape,
PostorderDFSNodeType::kConstantValue);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
handle_to_instruction(operand_handle));
return PostorderDFSNode().AddVisit(
[operand_proto, dimension, root]() -> absl::StatusOr<Literal> {
if (operand_proto->shape().is_dynamic_dimension(dimension)) {
return CreateGarbageLiteral(Shape(root->shape()));
} else {
return LiteralUtil::CreateR0<int32_t>(
operand_proto->shape().dimensions(dimension));
}
});
}
case HloOpcode::kSubtract:
case HloOpcode::kCos:
case HloOpcode::kSin:
case HloOpcode::kNegate:
case HloOpcode::kAbs:
case HloOpcode::kDivide: {
PostorderDFSNode result;
for (auto operand_id : root->operand_ids()) {
result.AddDependency(operand_id, PostorderDFSNodeType::kConstantValue,
context);
}
return result.AddVisit(
[root,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.Evaluate();
});
}
case HloOpcode::kCustomCall: {
if (root->custom_call_target() == "SetBound") {
return PostorderDFSNode().AddVisit([root]() -> absl::StatusOr<Literal> {
if (root->literal().shape().element_type() == TUPLE) {
return Literal::CreateFromProto(root->literal().tuple_literals(0));
} else {
return Literal::CreateFromProto(root->literal());
}
});
} else if (root->custom_call_target() == "Sharding") {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantValue, context)
.AddVisit([](Literal operand) { return operand; });
} else {
return PostorderDFSNode().AddVisit(
[root, context](absl::Span<Literal>) {
return CreateGarbageLiteral(ShapeUtil::GetSubshape(
Shape(root->shape()), context.shape_index));
});
}
}
case HloOpcode::kSort: {
PostorderDFSNode result;
InferenceContext dep_context = context;
dep_context.shape_index = {};
for (auto operand_id : root->operand_ids()) {
result.AddDependency(operand_id, PostorderDFSNodeType::kConstantValue,
dep_context);
}
const HloComputationProto* computation_proto =
handle_to_computation(root->called_computation_ids(0));
return result.AddVisit(
[root, context, computation_proto,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
TF_ASSIGN_OR_RETURN(
auto computation,
HloComputation::CreateFromProto(*computation_proto, {}));
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithComputation(std::move(computation))
.WithSubshape(context.shape_index)
.Evaluate();
});
}
default:
return AnalyzeConstantValueFallback(
handle, PostorderDFSNodeType::kConstantValue, context);
}
}
absl::StatusOr<PostorderDFSNode> PostorderDFSVisitor::AnalyzeIsDynamic(
int64_t handle, PostorderDFSNodeType type, InferenceContext context) {
TF_RETURN_IF_ERROR(handle_to_instruction(handle).status());
TF_RET_CHECK(handle_to_instruction(handle).value());
VLOG(1) << "Analyzing IsDynamic on "
<< handle_to_instruction(handle).value()->DebugString();
if (IsInstructionOverLimit(handle_to_instruction(handle).value(), context)) {
return CreateAllDynamicResult(
ShapeUtil::GetSubshape(
Shape(handle_to_instruction(handle).value()->shape()),
context.shape_index),
type);
}
TF_ASSIGN_OR_RETURN(const HloInstructionProto* root,
handle_to_instruction(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(root->opcode()));
PostorderDFSNode result;
for (auto operand_id : root->operand_ids()) {
InferenceContext dep_context = context;
dep_context.shape_index = {};
result.AddDependency(operand_id, type, dep_context);
}
switch (opcode) {
case HloOpcode::kGetDimensionSize: {
int64_t dimension = root->dimensions(0);
int64_t operand_handle = root->operand_ids(0);
TF_ASSIGN_OR_RETURN(const HloInstructionProto* operand_proto,
handle_to_instruction(operand_handle));
return PostorderDFSNode().AddVisit(
[operand_proto, dimension, type]() -> absl::StatusOr<Literal> {
if (type == PostorderDFSNodeType::kBoundIsDynamic) {
return LiteralUtil::CreateR0<bool>(false);
}
return LiteralUtil::CreateR0<bool>(
operand_proto->shape().is_dynamic_dimension(dimension));
});
}
case HloOpcode::kSort: {
auto dfs = PostorderDFSNode();
InferenceContext dep_context = context;
dep_context.shape_index = {};
for (int64_t i = 0; i < root->operand_ids_size(); ++i) {
dfs.AddDependency(root->operand_ids(i), type, dep_context);
}
return dfs.AddVisit([root, context, type](absl::Span<Literal> operands)
-> absl::StatusOr<Literal> {
bool all_operands_values_static = true;
for (int64_t i = 0; i < operands.size(); ++i) {
all_operands_values_static &= operands[i].IsAll(0);
}
if (type == PostorderDFSNodeType::kValueIsDynamic) {
return CreatePredLiteral(!all_operands_values_static,
ShapeUtil::GetSubshape(Shape(root->shape()),
context.shape_index));
}
CHECK(type == PostorderDFSNodeType::kBoundIsDynamic);
if (!context.shape_index.empty()) {
int64_t index = context.shape_index[0];
bool all_values_static = operands[index].IsAll(0);
return CreatePredLiteral(!all_values_static, operands[index].shape());
}
std::vector<Literal> results;
results.reserve(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
bool all_values_static = operands[i].IsAll(0);
results.emplace_back(
CreatePredLiteral(!all_values_static, operands[i].shape()));
}
if (!ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index)
.IsTuple()) {
return std::move(results[0]);
}
return LiteralUtil::MakeTupleOwned(std::move(results));
});
}
case HloOpcode::kSetDimensionSize:
return result.AddVisit([root, type](absl::Span<Literal> operands) {
bool any_dynamic_operand = absl::c_any_of(
operands, [](Literal& operand) { return !operand.IsAll(0); });
return CreatePredLiteral(
type == PostorderDFSNodeType::kValueIsDynamic &&
any_dynamic_operand,
ShapeUtil::MakeStaticShape(Shape(root->shape())));
});
case HloOpcode::kDynamicSlice: {
return result.AddVisit([root](absl::Span<Literal> operands) {
bool any_dynamic_operand = absl::c_any_of(
operands, [](Literal& operand) { return !operand.IsAll(0); });
return CreatePredLiteral(any_dynamic_operand, Shape(root->shape()));
});
}
case HloOpcode::kAbs:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCos:
case HloOpcode::kClz:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kConvert:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh: {
return result.AddVisit([](Literal operand) { return operand; });
}
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kComplex:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kCompare:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical: {
return result.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithPrimitiveType(PRED)
.WithOpCode(HloOpcode::kOr)
.Evaluate();
});
}
case HloOpcode::kTuple:
case HloOpcode::kTranspose:
case HloOpcode::kSlice:
case HloOpcode::kBroadcast:
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kPad: {
if (opcode == HloOpcode::kTuple && !context.shape_index.empty()) {
int64_t tuple_operand_index = context.shape_index.front();
InferenceContext tuple_operand_context = context;
tuple_operand_context.shape_index.pop_front();
return PostorderDFSNode()
.AddDependency(root->operand_ids(tuple_operand_index), type,
tuple_operand_context)
.AddVisit([](Literal operand) { return operand; });
}
return result.AddVisit([root, this](absl::Span<Literal> operands) {
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithPrimitiveType(PRED)
.Evaluate();
});
}
case HloOpcode::kCall: {
auto node = PostorderDFSNode();
auto* call_proto = root;
if (call_proto->operand_ids_size() != 1) {
return CreateAllDynamicResult(
Shape(handle_to_instruction(handle).value()->shape()), type);
}
int64_t call_root =
handle_to_computation(call_proto->called_computation_ids(0))
->root_id();
InferenceContext branch_context = context;
branch_context.caller_operand_handles.push_back(
call_proto->operand_ids(0));
node.AddDependency(call_root, PostorderDFSNodeType::kValueIsDynamic,
branch_context, "callee's root instruction");
return node.AddVisit(
[context](Literal operand) -> absl::StatusOr<Literal> {
return operand;
});
}
case HloOpcode::kConditional: {
auto node = PostorderDFSNode();
auto* conditional_proto = root;
InferenceContext predicate_context = context;
predicate_context.shape_index = {};
node.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kConstantValue,
predicate_context)
.AddDependency(conditional_proto->operand_ids(0),
PostorderDFSNodeType::kValueIsDynamic,
predicate_context);
const int64_t branch_size =
conditional_proto->called_computation_ids_size();
for (int64_t i = 0; i < branch_size; ++i) {
int64_t branch_root =
handle_to_computation(conditional_proto->called_computation_ids(i))
->root_id();
InferenceContext branch_context = context;
branch_context.caller_operand_handles.push_back(
conditional_proto->operand_ids(i + 1));
node.AddDependency(branch_root, PostorderDFSNodeType::kConstantValue,
branch_context,
absl::StrFormat("branch %lld's value", i))
.AddDependency(branch_root, PostorderDFSNodeType::kValueIsDynamic,
branch_context,
absl::StrFormat("branch %lld's dynamism", i));
}
return node.AddVisit([root, branch_size,
context](absl::Span<Literal> operands)
-> absl::StatusOr<Literal> {
int64_t pred_is_dynamic = operands[1].Get<bool>({});
auto result = CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
if (pred_is_dynamic) {
VLOG(1) << "predict is dynamic value" << result.ToString();
result.MutableEachCell<bool>(
[&](absl::Span<const int64_t> indices, bool value) {
std::string branch_value = operands[2].GetAsString(indices, {});
for (int64_t i = 0; i < branch_size; ++i) {
const int64_t branch_value_index = 2 + 2 * i;
const int64_t branch_dynamism_index = 2 + 2 * i + 1;
auto branch_is_dynamic =
operands[branch_dynamism_index].Get<bool>(indices);
if (branch_is_dynamic) {
return true;
}
if (branch_value !=
operands[branch_value_index].GetAsString(indices, {})) {
return true;
}
}
return false;
});
return result;
} else {
VLOG(1) << "predict is constant value";
int64_t branch_index = 0;
if (operands[0].shape().element_type() == PRED) {
if (operands[0].Get<bool>({})) {
branch_index = 0;
} else {
branch_index = 1;
}
} else {
branch_index = operands[0].GetIntegralAsS64({}).value();
}
const int64_t branch_dynamism_index = 2 + 2 * branch_index + 1;
return std::move(operands[branch_dynamism_index]);
}
});
}
case HloOpcode::kGetTupleElement: {
int64_t operand_handle = root->operand_ids(0);
PostorderDFSNode result;
context.shape_index.push_front(root->tuple_index());
return PostorderDFSNode()
.AddDependency(operand_handle, type, context)
.AddVisit([](Literal operand) { return operand; });
}
case HloOpcode::kReduce: {
return result.AddVisit(
[root, context, this](absl::Span<Literal> operands) {
Shape root_shape = Shape(root->shape());
Shape scalar_shape = ShapeUtil::MakeScalarShape(xla::PRED);
std::unique_ptr<HloComputation> reduce_or;
if (root_shape.IsTuple()) {
HloComputation::Builder b("reduce_or");
auto accum = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<bool>(false)));
for (int i = 0; i < root_shape.tuple_shapes_size(); ++i) {
auto lhs = b.AddInstruction(
HloInstruction::CreateParameter(i, scalar_shape, "lhs"));
auto rhs = b.AddInstruction(HloInstruction::CreateParameter(
i + root_shape.tuple_shapes_size(), scalar_shape, "rhs"));
accum = b.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kOr, accum, lhs));
accum = b.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kOr, accum, rhs));
}
std::vector<HloInstruction*> results(
root_shape.tuple_shapes_size(), accum);
b.AddInstruction(HloInstruction::CreateTuple(results));
reduce_or = b.Build();
} else {
HloComputation::Builder b("reduce_or");
auto lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
b.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kOr, lhs, rhs));
reduce_or = b.Build();
}
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(operands)
.WithPrimitiveType(PRED)
.WithComputation(std::move(reduce_or))
.WithSubshape(context.shape_index)
.Evaluate();
});
}
case HloOpcode::kConstant:
case HloOpcode::kIota: {
return result.AddVisit(
[root]() { return CreatePredLiteral(false, Shape(root->shape())); });
}
case HloOpcode::kParameter: {
if (opcode == HloOpcode::kParameter &&
!context.caller_operand_handles.empty()) {
int64_t caller_operand = context.caller_operand_handles.back();
context.caller_operand_handles.pop_back();
return result.AddDependency(caller_operand, type, context)
.AddVisit([](Literal literal) { return literal; });
}
return result.AddVisit([root, context]() {
return CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
});
}
case HloOpcode::kSelect: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kConstantValue, context)
.AddDependency(root->operand_ids(0),
PostorderDFSNodeType::kValueIsDynamic, context)
.AddDependency(root->operand_ids(1), type, context)
.AddDependency(root->operand_ids(2), type, context)
.AddVisit([root](absl::Span<Literal> operands)
-> absl::StatusOr<Literal> {
OptionalLiteral optional_selector_literal(std::move(operands[0]),
std::move(operands[1]));
Literal lhs = std::move(operands[2]);
Literal rhs = std::move(operands[3]);
auto result = CreatePredLiteral(true, Shape(root->shape()));
result.MutableEachCell<bool>(
[&](absl::Span<const int64_t> indices, bool value) {
std::optional<bool> optional_selector =
optional_selector_literal.Get<bool>(indices);
bool lhs_value = lhs.Get<bool>(indices);
bool rhs_value = rhs.Get<bool>(indices);
if (optional_selector.has_value()) {
if (*optional_selector) {
return lhs_value;
} else {
return rhs_value;
}
} else {
return true;
}
});
return result;
});
}
case HloOpcode::kGather: {
return PostorderDFSNode()
.AddDependency(root->operand_ids(0), type, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kConstantValue, context)
.AddDependency(root->operand_ids(1),
PostorderDFSNodeType::kValueIsDynamic, context)
.AddVisit(
[root,
this](absl::Span<Literal> operands) -> absl::StatusOr<Literal> {
OptionalLiteral optional_selector_literal(
std::move(operands[1]), std::move(operands[2]));
if (!optional_selector_literal.AllValid()) {
return CreatePredLiteral(true, Shape(root->shape()));
}
std::vector<Literal> new_operands;
new_operands.emplace_back(std::move(operands[0]));
new_operands.emplace_back(
optional_selector_literal.GetValue()->Clone());
return std::make_unique<HloProtoEvaluator>(evaluator, *root)
->WithOperands(absl::MakeSpan(new_operands))
.WithPrimitiveType(PRED)
.Evaluate();
});
}
case HloOpcode::kCustomCall: {
if (root->custom_call_target() == "SetBound") {
return PostorderDFSNode().AddVisit([type,
root]() -> absl::StatusOr<Literal> {
if (type == PostorderDFSNodeType::kBoundIsDynamic) {
return CreatePredLiteral(false, Shape(root->shape()));
} else {
if (root->literal().shape().element_type() == TUPLE) {
return Literal::CreateFromProto(
root->literal().tuple_literals(1));
} else if (type == PostorderDFSNodeType::kValueIsDynamic) {
return CreatePredLiteral(true, Shape(root->shape()));
} else {
return Literal::CreateFromProto(root->literal());
}
}
});
} else if (root->custom_call_target() == "Sharding") {
return result.AddVisit([](Literal operand) { return operand; });
} else {
return InvalidArgument(
"Dynamic inferencing on custom call %s is not supported",
root->DebugString());
}
break;
}
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kWhile: {
return PostorderDFSNode().AddVisit([root, context]()
-> absl::StatusOr<Literal> {
return CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
});
break;
}
default:
return PostorderDFSNode().AddVisit([root, context]()
-> absl::StatusOr<Literal> {
return CreatePredLiteral(
true,
ShapeUtil::GetSubshape(Shape(root->shape()), context.shape_index));
});
}
}
absl::StatusOr<Literal> PostorderDFSVisitor::PostOrderDFSVisit(
int64_t handle, PostorderDFSNodeType type) {
enum VisitState {
kUnvisited = 0,
kVisiting,
kVisited,
};
int64_t unique_id = 0;
struct WorkItem {
explicit WorkItem(int64_t handle, InferenceContext context,
PostorderDFSNodeType type, VisitState state, int64_t id)
: handle(handle),
context(std::move(context)),
type(type),
state(state),
id(id) {}
int64_t handle;
InferenceContext context;
PostorderDFSNodeType type;
VisitState state;
Visit visit;
int64_t id;
std::vector<CacheKey> dependencies;
CacheKey GetCacheKey() { return CacheKey(handle, context, type); }
};
std::vector<WorkItem> stack;
WorkItem root(handle, InferenceContext({}, {}), type, kUnvisited,
unique_id++);
stack.push_back(root);
while (!stack.empty()) {
WorkItem& item = stack.back();
VLOG(1) << "stack top shape index: " << item.context.shape_index.ToString();
if (VLOG_IS_ON(1)) {
TF_RETURN_IF_ERROR(handle_to_instruction(item.handle).status());
VLOG(1) << "stack top "
<< handle_to_instruction(item.handle).value()->DebugString();
}
if (item.state == kVisiting) {
VLOG(1) << "visiting";
std::vector<Literal> literals;
literals.reserve(item.dependencies.size());
for (CacheKey& dep_key : item.dependencies) {
TF_RET_CHECK(evaluated.contains(dep_key));
literals.emplace_back(evaluated.at(dep_key).Clone());
}
VLOG(1) << "Start visiting with dependency type: "
<< PostorderDFSNodeTypeToString(item.type);
TF_ASSIGN_OR_RETURN(auto literal, item.visit(absl::MakeSpan(literals)));
VLOG(1) << "End visiting: " << literal.ToString();
evaluated[item.GetCacheKey()] = std::move(literal);
stack.pop_back();
continue;
}
VLOG(1) << "unvisited";
if (evaluated.contains(item.GetCacheKey())) {
stack.pop_back();
continue;
}
item.state = kVisiting;
PostorderDFSNode node;
switch (item.type) {
case PostorderDFSNodeType::kConstantValue: {
VLOG(1) << "constant value";
TF_ASSIGN_OR_RETURN(node, AnalyzeConstant(item.handle, item.context));
break;
}
case PostorderDFSNodeType::kConstantLowerBound: {
VLOG(1) << "constant lower bound";
TF_ASSIGN_OR_RETURN(node, AnalyzeLowerBound(item.handle, item.context));
break;
}
case PostorderDFSNodeType::kConstantUpperBound: {
VLOG(1) << "constant upper bound";
TF_ASSIGN_OR_RETURN(node, AnalyzeUpperBound(item.handle, item.context));
break;
}
case PostorderDFSNodeType::kBoundIsDynamic:
case PostorderDFSNodeType::kValueIsDynamic: {
VLOG(1) << "value is dynamic";
TF_ASSIGN_OR_RETURN(
node, AnalyzeIsDynamic(item.handle, item.type, item.context));
break;
}
}
item.visit = node.visit;
const int64_t current_item_id = stack.size() - 1;
for (const PostorderDFSDep& dep : node.dependencies) {
TF_ASSIGN_OR_RETURN(auto dependency_inst,
handle_to_instruction(dep.handle));
VLOG(1) << "dependency " << dep.annotation
<< "::" << dependency_inst->DebugString() << "index"
<< dep.context.shape_index << " stack size:" << stack.size();
stack.emplace_back(dep.handle, dep.context, dep.type, kUnvisited,
unique_id++);
stack[current_item_id].dependencies.push_back(stack.back().GetCacheKey());
}
}
VLOG(1) << "done" << evaluated[root.GetCacheKey()].ToString();
return evaluated[root.GetCacheKey()].Clone();
}
absl::StatusOr<Literal> ValueInference::AnalyzeIsDynamic(XlaOp op) {
PostorderDFSVisitor visitor(
evaluator_,
[&](int64_t handle) {
return builder_->LookUpInstructionByHandle(handle);
},
[&](int64_t handle) { return &(builder_->embedded_[handle]); });
auto result = visitor.PostOrderDFSVisit(
op.handle(), PostorderDFSNodeType::kValueIsDynamic);
return result;
}
absl::StatusOr<std::optional<int64_t>> ValueInference::CseOpHandle(
int64_t handle) {
TF_ASSIGN_OR_RETURN(auto inst, builder_->LookUpInstructionByHandle(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(inst->opcode()));
if (opcode != HloOpcode::kGetDimensionSize) {
return {std::nullopt};
}
int64_t hash = absl::HashOf(inst->operand_ids(0), inst->dimensions(0));
auto lookup = cse_map_.find(hash);
if (lookup == cse_map_.end()) {
cse_map_[hash] = handle;
return {std::nullopt};
}
TF_ASSIGN_OR_RETURN(auto equivalent_op,
builder_->LookUpInstructionByHandle(lookup->second));
if (equivalent_op->opcode() != inst->opcode() ||
equivalent_op->operand_ids(0) != inst->operand_ids(0) ||
equivalent_op->dimensions(0) != inst->dimensions(0)) {
return {std::nullopt};
}
int64_t cse = lookup->second;
if (handle != cse) {
return {cse};
}
return {std::nullopt};
}
absl::StatusOr<Literal> ValueInference::SimplifyOp(int64_t handle) {
TF_ASSIGN_OR_RETURN(auto cse_handle, CseOpHandle(handle));
if (cse_handle) {
return SimplifyOp(*cse_handle);
}
TF_ASSIGN_OR_RETURN(auto* inst, builder_->LookUpInstructionByHandle(handle));
TF_ASSIGN_OR_RETURN(HloOpcode opcode, StringToHloOpcode(inst->opcode()));
std::vector<Literal> operands;
auto output_shape = std::make_unique<const Shape>(inst->shape());
switch (opcode) {
case HloOpcode::kSlice:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kBroadcast: {
for (auto operand_id : inst->operand_ids()) {
TF_ASSIGN_OR_RETURN(auto literal, SimplifyOp(operand_id));
operands.emplace_back(std::move(literal));
}
return std::make_unique<HloProtoEvaluator>(evaluator_, *inst)
->WithOperands(absl::MakeSpan(operands))
.WithPrimitiveType(S64)
.Evaluate();
}
case HloOpcode::kConvert: {
auto operand =
builder_->LookUpInstructionByHandle(inst->operand_ids(0)).value();
if (Shape::Equal()(*output_shape, Shape(operand->shape()))) {
return SimplifyOp(inst->operand_ids(0));
} else {
return CreateS64Literal(-1, *output_shape);
}
}
case HloOpcode::kAdd: {
if (output_shape->rank() == 0) {
TF_ASSIGN_OR_RETURN(auto lhs, SimplifyOp(inst->operand_ids(0)));
TF_ASSIGN_OR_RETURN(auto rhs, SimplifyOp(inst->operand_ids(1)));
int64_t lhs_handle = lhs.Get<int64_t>({});
int64_t rhs_handle = rhs.Get<int64_t>({});
if (lhs_handle == -1 || rhs_handle == -1) {
return CreateS64Literal(-1, *output_shape);
}
std::function<std::optional<int64_t>(int64_t, int64_t)>
can_be_optimized;
can_be_optimized = [this, &can_be_optimized](
int64_t lhs,
int64_t rhs) -> std::optional<int64_t> {
auto rhs_inst = builder_->LookUpInstructionByHandle(rhs).value();
HloOpcode rhs_opcode = StringToHloOpcode(rhs_inst->opcode()).value();
if (rhs_opcode == HloOpcode::kSubtract) {
auto sub_lhs_handle =
SimplifyOp(rhs_inst->operand_ids(0)).value().Get<int64_t>({});
auto sub_rhs_handle =
SimplifyOp(rhs_inst->operand_ids(1)).value().Get<int64_t>({});
if (sub_rhs_handle == lhs) {
return sub_lhs_handle;
}
}
auto lhs_inst = builder_->LookUpInstructionByHandle(lhs).value();
HloOpcode lhs_opcode = StringToHloOpcode(lhs_inst->opcode()).value();
if (lhs_opcode == HloOpcode::kAdd) {
auto add_lhs_handle =
SimplifyOp(lhs_inst->operand_ids(0)).value().Get<int64_t>({});
auto add_rhs_handle =
SimplifyOp(lhs_inst->operand_ids(1)).value().Get<int64_t>({});
if (auto optimized = can_be_optimized(add_lhs_handle, rhs)) {
return Add(XlaOp(add_rhs_handle, builder_),
XlaOp(optimized.value(), builder_))
.handle();
}
if (auto optimized = can_be_optimized(add_rhs_handle, rhs)) {
return Add(XlaOp(add_lhs_handle, builder_),
XlaOp(optimized.value(), builder_))
.handle();
}
}
return std::nullopt;
};
if (auto optimized = can_be_optimized(lhs_handle, rhs_handle)) {
return LiteralUtil::CreateR0<int64_t>(optimized.value());
}
if (auto optimized = can_be_optimized(rhs_handle, lhs_handle)) {
return LiteralUtil::CreateR0<int64_t>(optimized.value());
}
XlaOp new_sum =
Add(XlaOp(lhs_handle, builder_), XlaOp(rhs_handle, builder_));
return LiteralUtil::CreateR0<int64_t>(new_sum.handle());
} else {
return CreateS64Literal(-1, *output_shape);
}
}
default: {
if (ShapeUtil::IsScalar(*output_shape)) {
return LiteralUtil::CreateR0<int64_t>(handle);
} else {
return CreateS64Literal(-1, *output_shape);
}
}
}
}
absl::StatusOr<OptionalLiteral> ValueInference::AnalyzeConstant(
XlaOp op, ValueInferenceMode mode) {
TF_RETURN_IF_ERROR(builder_->LookUpInstructionByHandle(op.handle()).status());
PostorderDFSVisitor visitor(
evaluator_,
[&](int64_t handle) {
return builder_->LookUpInstructionByHandle(handle);
},
[&](int64_t handle) { return &(builder_->embedded_[handle]); });
TF_ASSIGN_OR_RETURN(Shape op_shape, builder_->GetShape(op));
int64_t handle = op.handle();
if (ShapeUtil::IsScalar(builder_->GetShape(op).value())) {
TF_ASSIGN_OR_RETURN(auto result, SimplifyOp(handle));
auto optimized_handle = result.Get<int64_t>({});
if (optimized_handle != -1) {
handle = optimized_handle;
}
}
switch (mode) {
case ValueInferenceMode::kLowerBound: {
TF_ASSIGN_OR_RETURN(Literal mask,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kBoundIsDynamic));
if (mask.IsAll(1)) {
return OptionalLiteral(CreateGarbageLiteral(op_shape), std::move(mask));
}
TF_ASSIGN_OR_RETURN(
Literal value,
visitor.PostOrderDFSVisit(handle,
PostorderDFSNodeType::kConstantLowerBound));
return OptionalLiteral(std::move(value), std::move(mask));
}
case ValueInferenceMode::kUpperBound: {
TF_ASSIGN_OR_RETURN(Literal mask,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kBoundIsDynamic));
if (mask.IsAll(1)) {
return OptionalLiteral(CreateGarbageLiteral(op_shape), std::move(mask));
}
TF_ASSIGN_OR_RETURN(
Literal value,
visitor.PostOrderDFSVisit(handle,
PostorderDFSNodeType::kConstantUpperBound));
return OptionalLiteral(std::move(value), std::move(mask));
}
case ValueInferenceMode::kValue: {
TF_ASSIGN_OR_RETURN(Literal mask,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kValueIsDynamic));
if (mask.IsAll(1)) {
return OptionalLiteral(CreateGarbageLiteral(op_shape), std::move(mask));
}
TF_ASSIGN_OR_RETURN(Literal value,
visitor.PostOrderDFSVisit(
handle, PostorderDFSNodeType::kConstantValue));
return OptionalLiteral(std::move(value), std::move(mask));
}
}
}
} | #include "xla/hlo/builder/value_inference.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/client/client_library.h"
#include "xla/client/global_data.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/prng.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ValueInferenceTest : public ::testing::Test {
public:
std::string TestName() const {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
};
class DynamismInferenceTest : public ValueInferenceTest {
public:
explicit DynamismInferenceTest(se::Platform* platform = nullptr)
: platform_(platform) {}
absl::StatusOr<Literal> ComputeDynamismLiteral(
XlaOp operand, XlaBuilder* builder, Layout* output_layout = nullptr) {
TF_RETURN_IF_ERROR(builder->first_error());
ValueInference value_inference(builder);
TF_ASSIGN_OR_RETURN(auto literal_slice,
value_inference.AnalyzeIsDynamic(operand));
return literal_slice.Clone();
}
absl::StatusOr<bool> ComputeDynamismScalar(XlaOp operand, XlaBuilder* builder,
ShapeIndex index = {}) {
TF_ASSIGN_OR_RETURN(auto literal,
ComputeDynamismLiteral(operand, builder, nullptr));
return literal.Get<bool>({}, index);
}
se::Platform* platform_;
};
TEST_F(DynamismInferenceTest, ScalarInt32Literal) {
XlaBuilder b(TestName());
auto computation = ConstantR0<int32_t>(&b, 42);
auto value = ComputeDynamismScalar(computation, &b);
ASSERT_TRUE(value.ok()) << value.status();
EXPECT_EQ(value.value(), false);
}
TEST_F(DynamismInferenceTest, Iota) {
XlaBuilder b(TestName());
auto computation = Iota(&b, S32, 2);
EXPECT_FALSE(ComputeDynamismLiteral(computation, &b).value().Get<bool>({0}));
}
TEST_F(DynamismInferenceTest, TupleSimple) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto tuple = Tuple(&b, {c, p});
EXPECT_EQ(ComputeDynamismScalar(tuple, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, TupleGteKeepsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto tuple = Tuple(&b, {c, p});
auto gte0 = GetTupleElement(tuple, 0);
auto gte1 = GetTupleElement(tuple, 1);
auto tuple_2 = Tuple(&b, {gte0, gte1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, PredValueUsedTwice) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto pred = Eq(c, p);
auto result = Select(pred, p, c);
EXPECT_EQ(ComputeDynamismScalar(result, &b, {}).value(), true);
}
TEST_F(DynamismInferenceTest, ReduceUsedTwice) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2}), "p0");
auto zero = ConstantR0<int32_t>(&b, 0);
XlaComputation add_s32 = CreateScalarAddComputation(S32, &b);
auto reduce = Reduce(p, zero, add_s32, {0});
auto pred = Eq(c, reduce);
auto result = Select(pred, reduce, c);
EXPECT_EQ(ComputeDynamismScalar(result, &b, {}).value(), true);
}
TEST_F(DynamismInferenceTest, VariadicReduce) {
XlaBuilder b(TestName());
auto c = ConstantR2<int32_t>(&b, {{0, 0}});
auto p = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {1, 2}), "p0");
auto half_dynamic = ConcatInDim(&b, {c, p}, 0);
XlaBuilder reduce_add("reduce_add");
auto p0 = Parameter(&reduce_add, 0, ShapeUtil::MakeScalarShape(S32), "p");
auto p1 = Parameter(&reduce_add, 1, ShapeUtil::MakeScalarShape(S32), "p");
auto p2 = Parameter(&reduce_add, 2, ShapeUtil::MakeScalarShape(S32), "p");
auto p3 = Parameter(&reduce_add, 3, ShapeUtil::MakeScalarShape(S32), "p");
auto reduce_result = p0;
reduce_result = Add(reduce_result, p1);
reduce_result = Add(reduce_result, p2);
reduce_result = Add(reduce_result, p3);
Tuple(&reduce_add, {reduce_result, reduce_result});
auto init = ConstantR0<int32_t>(&b, 0);
auto variadic_reduce = Reduce(&b, {half_dynamic, half_dynamic}, {init, init},
reduce_add.Build().value(), {1});
auto result = GetTupleElement(variadic_reduce, 0);
EXPECT_FALSE(ComputeDynamismLiteral(result, &b).value().Get<bool>({0}));
EXPECT_TRUE(ComputeDynamismLiteral(result, &b).value().Get<bool>({1}));
}
TEST_F(DynamismInferenceTest, DynamicSelectorWithMixedValues) {
XlaBuilder b(TestName());
auto constant_pred = ConstantR1<bool>(&b, {true});
auto dynamic_pred = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {1}), "p0");
auto concat = ConcatInDim(&b, {constant_pred, dynamic_pred}, 0);
auto constant_values = ConstantR1<bool>(&b, {true, true});
auto result = Select(concat, constant_values, constant_values);
EXPECT_FALSE(ComputeDynamismLiteral(result, &b).value().Get<bool>({0}));
EXPECT_TRUE(ComputeDynamismLiteral(result, &b).value().Get<bool>({1}));
}
TEST_F(DynamismInferenceTest, ConcatSliceReshapeKeepsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto concat = ConcatScalars(&b, {c, p});
auto slice0 = SliceInDim(concat, 0, 1, 1, 0);
auto reshape0 = Reshape(slice0, {});
auto slice1 = SliceInDim(concat, 1, 2, 1, 0);
auto reshape1 = Reshape(slice1, {});
auto tuple_2 = Tuple(&b, {reshape0, reshape1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, ParameterIsDynamic) {
XlaBuilder b(TestName());
auto computation = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto value = ComputeDynamismScalar(computation, &b);
ASSERT_TRUE(value.ok()) << value.status();
EXPECT_EQ(value.value(), true);
}
TEST_F(DynamismInferenceTest, UnaryOpKeepsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto neg0 = Neg(c);
auto neg1 = Neg(p);
auto tuple_2 = Tuple(&b, {neg0, neg1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, ParameterWithToken) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0,
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeScalarShape(S32)}),
"p0");
EXPECT_EQ(ComputeDynamismScalar(p, &b, {0}).value(), true);
EXPECT_EQ(ComputeDynamismScalar(p, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, BinaryOpsOrsDynamism) {
XlaBuilder b(TestName());
auto c = ConstantR0<int32_t>(&b, 42);
auto p = Parameter(&b, 0, ShapeUtil::MakeScalarShape(S32), "p0");
auto add1 = Add(c, c);
auto add2 = Add(p, c);
auto tuple_2 = Tuple(&b, {add1, add2});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), false);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), true);
}
TEST_F(DynamismInferenceTest, GetDimensionSize) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto tuple_2 = Tuple(&b, {gds0, gds1});
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {0}).value(), true);
EXPECT_EQ(ComputeDynamismScalar(tuple_2, &b, {1}).value(), false);
}
TEST_F(DynamismInferenceTest, DynamicSliceWithConstantOperands) {
XlaBuilder b(TestName());
auto constant = ConstantR1<int32_t>(&b, {0, 1, 2, 3});
auto slice_start = ConstantR0(&b, 1);
auto dynamic_slice = DynamicSlice(constant, {slice_start}, {1});
EXPECT_FALSE(
ComputeDynamismLiteral(dynamic_slice, &b).value().Get<bool>({0}));
}
TEST_F(DynamismInferenceTest, GatherWithCommonParent) {
XlaBuilder b(TestName());
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto operand1 = Parameter(&b, 0, indices_shape, "p1");
auto operand2 = Parameter(&b, 1, indices_shape, "p2");
auto indices = Sub(operand1, operand2);
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
auto gather = Gather(operand1, indices, dim_numbers, {1});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_TRUE(ComputeDynamismLiteral(gather, &b).value().Get<bool>({0, 0}));
}
TEST_F(DynamismInferenceTest, GatherWithConstantParent) {
XlaBuilder b(TestName());
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto data_operand = ConstantR1<int32_t>(&b, {1, 2});
auto indices = ConstantR1<int32_t>(&b, {1, 2});
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
auto gather = Gather(data_operand, indices, dim_numbers, {1});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gather, &b).value().Get<bool>({0, 0}));
}
TEST_F(DynamismInferenceTest, GatherWithSharedConstantParent) {
XlaBuilder b(TestName());
Shape indices_shape = ShapeUtil::MakeShape(S32, {2});
auto operand1 = ConstantR1<int32_t>(&b, {1, 2});
auto operand2 = ConstantR1<int32_t>(&b, {1, 2});
auto indices = Sub(operand1, operand2);
GatherDimensionNumbers dim_numbers;
dim_numbers.add_offset_dims(1);
dim_numbers.add_start_index_map(0);
dim_numbers.set_index_vector_dim(1);
auto gather = Gather(operand1, indices, dim_numbers, {1});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gather, &b).value().Get<bool>({0, 0}));
}
TEST_F(DynamismInferenceTest, InferThroughPad) {
XlaBuilder b(TestName());
auto operand1 = ConstantR1<int32_t>(&b, {1, 2});
auto parameter = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {}), "p0");
PaddingConfig padding_config;
padding_config.add_dimensions()->set_edge_padding_high(1);
auto pad = Pad(operand1, parameter, padding_config);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(pad, &b).value().Get<bool>({0}));
EXPECT_FALSE(ComputeDynamismLiteral(pad, &b).value().Get<bool>({1}));
EXPECT_TRUE(ComputeDynamismLiteral(pad, &b).value().Get<bool>({2}));
}
TEST_F(DynamismInferenceTest, InferThroughConditionalBranchesAreSame) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 1)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Parameter(&false_builder, 0, s32_shape, "cond_param");
Tuple(&false_builder, {ConstantR0<int32_t>(&false_builder, 1)});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto parameter = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "p0");
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond = Conditional(parameter, constant, true_computation, constant,
false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest, InferThroughCall) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
XlaBuilder call_builder("call");
Parameter(&call_builder, 0, s32_shape, "call_param");
auto call_computation = call_builder.Build().value();
XlaBuilder b(TestName());
auto constant = ConstantR0<int32_t>(&b, 3);
auto call = Call(&b, call_computation, {constant});
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_EQ(ComputeDynamismScalar(call, &b, {}).value(), false);
}
TEST_F(DynamismInferenceTest, InferThroughConditionalBranchesAreNotSame) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 1)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Parameter(&false_builder, 0, s32_shape, "cond_param");
Tuple(&false_builder, {ConstantR0<int32_t>(&false_builder, 2)});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto parameter = Parameter(&b, 0, ShapeUtil::MakeShape(PRED, {}), "p0");
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond = Conditional(parameter, constant, true_computation, constant,
false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_TRUE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest, InferThroughConditionalPredIsConstantTrueBranch) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 0)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Tuple(&false_builder,
{Parameter(&false_builder, 0, s32_shape, "cond_param")});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto pred = ConstantR0<bool>(&b, true);
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond = Conditional(pred, constant, true_computation, constant,
false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest,
InferThroughConditionalPredIsConstantFalseBranch) {
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder true_builder("true");
Parameter(&true_builder, 0, s32_shape, "cond_param");
Tuple(&true_builder, {ConstantR0<int32_t>(&true_builder, 0)});
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
Tuple(&false_builder,
{Parameter(&false_builder, 0, s32_shape, "cond_param")});
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto param = Parameter(&b, 0, s32_shape, "param");
auto pred = ConstantR0<bool>(&b, false);
auto constant = ConstantR0<int32_t>(&b, 0);
auto cond =
Conditional(pred, constant, true_computation, param, false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_TRUE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
TEST_F(DynamismInferenceTest, ArgumentForwardingNestedTuple) {
auto pred_shape = ShapeUtil::MakeShape(PRED, {});
auto s32_shape = ShapeUtil::MakeShape(S32, {});
auto tuple_shape = ShapeUtil::MakeTupleShape({pred_shape, s32_shape});
auto cond_shape = ShapeUtil::MakeTupleShape({s32_shape});
XlaBuilder inner_true_builder("inner_true");
Parameter(&inner_true_builder, 0, s32_shape, "cond_param");
Tuple(&inner_true_builder, {ConstantR0<int32_t>(&inner_true_builder, 0)});
auto inner_true_computation = inner_true_builder.Build().value();
XlaBuilder inner_false_builder("inner_false");
Tuple(&inner_false_builder,
{Parameter(&inner_false_builder, 0, s32_shape, "cond_param")});
auto inner_false_computation = inner_false_builder.Build().value();
XlaBuilder true_builder("true");
{
auto param = Parameter(&true_builder, 0, tuple_shape, "param");
auto op = GetTupleElement(param, 1);
auto pred = GetTupleElement(param, 0);
Conditional(pred, op, inner_true_computation, op, inner_false_computation);
}
auto true_computation = true_builder.Build().value();
XlaBuilder false_builder("false");
{
auto param = Parameter(&false_builder, 0, tuple_shape, "param");
auto op = GetTupleElement(param, 1);
auto pred = GetTupleElement(param, 0);
Conditional(pred, op, inner_true_computation, op, inner_false_computation);
}
auto false_computation = false_builder.Build().value();
XlaBuilder b(TestName());
auto constant = ConstantR0<int32_t>(&b, 0);
auto pred = Parameter(&b, 0, pred_shape, "param");
auto param = Tuple(&b, {pred, constant});
auto cond =
Conditional(pred, param, true_computation, param, false_computation);
auto gte = GetTupleElement(cond, 0);
ASSERT_TRUE(b.first_error().ok()) << b.first_error().message();
EXPECT_FALSE(ComputeDynamismLiteral(gte, &b).value().Get<bool>({}));
}
class UpperBoundInferenceTest : public ValueInferenceTest {
public:
explicit UpperBoundInferenceTest(se::Platform* platform = nullptr)
: platform_(platform) {}
absl::StatusOr<OptionalLiteral> ComputeUpperBoundLiteral(
XlaOp operand, XlaBuilder* builder, Layout* output_layout = nullptr) {
ValueInference value_inference(builder);
TF_ASSIGN_OR_RETURN(auto literal,
value_inference.AnalyzeConstant(
operand, ValueInferenceMode::kUpperBound));
return literal;
}
se::Platform* platform_;
};
TEST_F(UpperBoundInferenceTest, GetDimensionSize) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto tuple_2 = Tuple(&b, {gds0, gds1});
EXPECT_EQ(ComputeUpperBoundLiteral(tuple_2, &b).value().Get<int32_t>({}, {0}),
2);
EXPECT_EQ(ComputeUpperBoundLiteral(tuple_2, &b).value().Get<int32_t>({}, {1}),
3);
}
TEST_F(UpperBoundInferenceTest, GetDimensionSizeSub) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto sub = Sub(gds1, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(sub, &b).value().Get<int32_t>({}), 3);
}
TEST_F(UpperBoundInferenceTest, GetDimensionSizeDiv) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, false}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto div = Div(gds1, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(div, &b).value().Get<int32_t>({}), 3);
}
TEST_F(UpperBoundInferenceTest, SumSubtract) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, true}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto sub = Sub(gds1, gds0);
auto add = Add(sub, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(add, &b).value().Get<int32_t>({}), 3);
auto add2 = Add(gds1, gds0);
auto add3 = Add(sub, add2);
EXPECT_EQ(ComputeUpperBoundLiteral(add3, &b).value().Get<int32_t>({}), 6);
}
TEST_F(UpperBoundInferenceTest, SumSubtractWithDataShuffling) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, true}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto broadcast = Broadcast(gds0, {1, 10});
auto convert = ConvertElementType(broadcast, S32);
auto slice = SliceInDim(convert, 0, 1,
1, 1);
gds0 = Reshape(slice, {});
auto sub = Sub(gds1, gds0);
auto add = Add(sub, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(add, &b).value().Get<int32_t>({}), 3);
auto add2 = Add(gds1, gds0);
auto add3 = Add(sub, add2);
EXPECT_EQ(ComputeUpperBoundLiteral(add3, &b).value().Get<int32_t>({}), 6);
}
TEST_F(UpperBoundInferenceTest, SumSubtractEquivalentGetDimensionSize) {
XlaBuilder b(TestName());
auto p =
Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2, 3}, {true, true}), "p0");
auto gds0 = GetDimensionSize(p, 0);
auto gds1 = GetDimensionSize(p, 1);
auto gds2 = GetDimensionSize(p, 0);
auto sub = Sub(gds1, gds2);
auto add = Add(sub, gds0);
EXPECT_EQ(ComputeUpperBoundLiteral(add, &b).value().Get<int32_t>({}), 3);
}
TEST_F(UpperBoundInferenceTest, ParamCantInferBound) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {2}, {true}), "p0");
auto p1 = Parameter(&b, 1, ShapeUtil::MakeShape(S32, {}, {}), "p1");
auto gds = GetDimensionSize(p0, 0);
auto sub = Div(gds, p1);
EXPECT_FALSE(
ComputeUpperBoundLiteral(sub, &b).value().Get<int32_t>({}).has_value());
}
TEST_F(UpperBoundInferenceTest, KeyValueSort) {
XlaBuilder comparator_b("comparator");
auto p0 = Parameter(&comparator_b, 0, ShapeUtil::MakeShape(S32, {}), "p0");
auto p1 = Parameter(&comparator_b, 1, ShapeUtil::MakeShape(S32, {}), "p1");
Parameter(&comparator_b, 2, ShapeUtil::MakeShape(S32, {}), "p2");
Parameter(&comparator_b, 3, ShapeUtil::MakeShape(S32, {}), "p3");
Compare(p0, p1, ComparisonDirection::kGe);
TF_ASSERT_OK_AND_ASSIGN(auto comparator, comparator_b.Build());
int64_t elem_count = 17;
XlaBuilder b(TestName());
auto param = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {elem_count}), "p0");
auto iota = Iota(&b, S32, elem_count);
auto sort = Sort({param, iota}, comparator);
auto gte = GetTupleElement(sort, 1);
for (int64_t i = 0; i < elem_count; ++i) {
auto result_first_elem =
ComputeUpperBoundLiteral(gte, &b).value().Get<int32_t>({i});
EXPECT_TRUE(result_first_elem.has_value());
EXPECT_EQ(result_first_elem.value(), elem_count - 1);
}
}
class ConstValueInferenceTest : public ValueInferenceTest {
public:
explicit ConstValueInferenceTest(se::Platform* platform = nullptr)
: platform_(platform) {}
absl::StatusOr<OptionalLiteral> ComputeConstantValueLiteral(
XlaOp operand, XlaBuilder* builder, Layout* output_layout = nullptr) {
ValueInference value_inference(builder);
TF_ASSIGN_OR_RETURN(auto literal, value_inference.AnalyzeConstant(
operand, ValueInferenceMode::kValue));
return literal;
}
se::Platform* platform_;
};
TEST_F(ConstValueInferenceTest, ConstValuePassThroughSetBound) {
XlaBuilder b(TestName());
auto p0 = ConstantR0<int32_t>(&b, 32);
Shape shape = ShapeUtil::MakeShape(S32, {});
xla::Literal dynamism = xla::LiteralUtil::CreateR0<bool>(false);
xla::Literal bound = xla::LiteralUtil::CreateR0<int32_t>(32);
xla::Literal tuple =
xla::LiteralUtil::MakeTupleOwned(std::move(bound), std::move(dynamism));
auto set_bound =
CustomCall(&b, "SetBound", {p0}, shape, "", false, {}, &tuple);
auto result =
ComputeConstantValueLiteral(set_bound, &b).value().Get<int32_t>({});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), 32);
}
TEST_F(ConstValueInferenceTest, ParamaterValuePassThroughSetBound) {
XlaBuilder b(TestName());
auto p0 = Parameter(&b, 0, ShapeUtil::MakeShape(S32, {}), "p0");
Shape shape = ShapeUtil::MakeShape(S32, {});
xla::Literal dynamism = xla::LiteralUtil::CreateR0<bool>(false);
xla::Literal bound = xla::LiteralUtil::CreateR0<int32_t>(32);
xla::Literal tuple =
xla::LiteralUtil::MakeTupleOwned(std::move(bound), std::move(dynamism));
auto set_bound =
CustomCall(&b, "SetBound", {p0}, shape, "", false, {}, &tuple);
auto result =
ComputeConstantValueLiteral(set_bound, &b).value().Get<int32_t>({});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), 32);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/value_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/value_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c721652f-e92d-4952-917a-d4fcc38955ce | cpp | tensorflow/tensorflow | broadcast | tensorflow/compiler/tf2xla/lib/broadcast.cc | third_party/xla/xla/tests/broadcast_test.cc | #include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "xla/client/lib/broadcast.h"
#include "xla/client/xla_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/bcast.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
absl::StatusOr<xla::XlaOp> BroadcastTo(xla::XlaOp input,
absl::Span<int64_t const> output_dims) {
return xla::BroadcastTo(input, output_dims);
}
Status BroadcastOpsToSame(xla::XlaOp* lhs, xla::XlaOp* rhs) {
TF_ASSIGN_OR_RETURN(auto lhs_xla_shape, lhs->builder()->GetShape(*lhs));
TF_ASSIGN_OR_RETURN(auto rhs_xla_shape, rhs->builder()->GetShape(*rhs));
tensorflow::TensorShape lhs_tf_shape;
tensorflow::TensorShape rhs_tf_shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(lhs_xla_shape, &lhs_tf_shape));
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(rhs_xla_shape, &rhs_tf_shape));
if (!lhs_tf_shape.IsSameSize(rhs_tf_shape)) {
tensorflow::BCast bcast(tensorflow::BCast::FromShape(lhs_tf_shape),
tensorflow::BCast::FromShape(rhs_tf_shape));
if (!bcast.IsValid()) {
return tensorflow::errors::InvalidArgument(
"Dimensions cannot be made to match through broadcasting");
}
TF_ASSIGN_OR_RETURN(*lhs, xla::BroadcastTo(*lhs, bcast.output_shape()));
TF_ASSIGN_OR_RETURN(*rhs, xla::BroadcastTo(*rhs, bcast.output_shape()));
}
return absl::OkStatus();
}
} | #include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class BroadcastTest : public HloTestBase {};
XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(LiteralUtil::CreateR0<float>(42.0), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto element1 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 2}), input, {0}));
auto element2 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3}), input, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({element1, element2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}),
LiteralSlice(result, {0}), error_spec_));
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}),
LiteralSlice(result, {1}), error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {0, 1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {1, 0}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 2}), input, {0, 2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
{{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}),
result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0, 2.0})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2, 3, 3}), input, {1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(2, 2, 3, 3);
Array2D<float> pz({{1, 2}, {1, 2}});
expected.FillWithPZ(pz);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) {
auto builder = HloComputation::Builder(TestName());
std::vector<float> input_data(1025);
int64_t r1_size = input_data.size();
std::iota(input_data.begin(), input_data.end(), 0.0f);
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(input_data)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 3, 3, r1_size}), input, {3}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(3, 3, 3, 1025);
Array2D<float> yx(3, r1_size);
for (int64_t y = 0; y < 3; ++y) {
for (int64_t x = 0; x < r1_size; ++x) {
yx(y, x) = input_data[x];
}
}
expected.FillWithYX(yx);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) {
auto builder = HloComputation::Builder(TestName());
Array4D<float> r4_array(32, 64, 7, 7);
r4_array.Fill(42.0);
std::vector<float> r1_array(64, 42.0);
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(r1_array)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {32, 64, 7, 7}), input, {1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(LiteralUtil::CreateR4FromArray4D(r4_array),
result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {64, 64, 3, 3}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
LOG(INFO) << hlo_module->ToString();
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(64, 64, 3, 3);
expected.Fill(1.0f);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) {
auto builder = HloComputation::Builder(TestName());
Array2D<float> to_broadcast({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2FromArray2D<float>(to_broadcast)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 3, 2, 2}), input, {2, 3}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(3, 3, 2, 2);
expected.FillWithYX(to_broadcast);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) {
auto builder = HloComputation::Builder(TestName());
Array3D<float> input_vals(2, 3, 4);
input_vals.FillRandom(1.0);
Array4D<float> expected(2, 3, 4, 5);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 4; ++k) {
for (int m = 0; m < 5; ++m) {
expected(i, j, k, m) = input_vals(i, j, k);
}
}
}
}
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR3FromArray3D<float>(input_vals)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 4, 5}), input, {0, 1, 2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/lib/broadcast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/broadcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d321a62-aaa5-49f2-af6b-45de6e1cf96f | cpp | tensorflow/tensorflow | tridiagonal | third_party/xla/xla/hlo/builder/lib/tridiagonal.cc | third_party/xla/xla/hlo/builder/lib/tridiagonal_test.cc | #include "xla/hlo/builder/lib/tridiagonal.h"
#include <cstdint>
#include <numeric>
#include <string>
#include <string_view>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace tridiagonal {
namespace {
absl::Status CheckSecondToLastDimension(const Shape& op_shape, int64_t rank,
int64_t expected,
const std::string& op_name) {
const auto actual_num_dims = ShapeUtil::GetDimension(op_shape, rank - 2);
if (actual_num_dims != expected) {
return InvalidArgument(
"Second to last dimension of %s should be %d but is %d.", op_name,
expected, actual_num_dims);
}
return absl::OkStatus();
}
absl::StatusOr<int64_t> CheckSystemAndReturnNumEquations(XlaOp lower_diagonal,
XlaOp main_diagonal,
XlaOp upper_diagonal,
XlaOp rhs) {
XlaBuilder* builder = lower_diagonal.builder();
TF_ASSIGN_OR_RETURN(Shape lower_diagonal_shape,
builder->GetShape(lower_diagonal));
TF_ASSIGN_OR_RETURN(Shape main_diagonal_shape,
builder->GetShape(main_diagonal));
TF_ASSIGN_OR_RETURN(Shape upper_diagonal_shape,
builder->GetShape(upper_diagonal));
TF_ASSIGN_OR_RETURN(Shape rhs_shape, builder->GetShape(rhs));
const auto lower_diagonal_rank = lower_diagonal_shape.rank();
const auto main_diagonal_rank = main_diagonal_shape.rank();
const auto upper_diagonal_rank = upper_diagonal_shape.rank();
const auto rhs_rank = rhs_shape.rank();
if (!((lower_diagonal_rank == main_diagonal_rank) &&
(lower_diagonal_rank == upper_diagonal_rank) &&
(lower_diagonal_rank == rhs_rank))) {
return InvalidArgument(
"All inputs should have the same rank but got rank "
"%d for lower diagonal, %d for diagonal, %d for upper diagonal, "
"%d for rhs",
lower_diagonal_rank, main_diagonal_rank, upper_diagonal_rank, rhs_rank);
}
const auto rank = lower_diagonal_rank;
if (rank < 2) {
return InvalidArgument("Arguments must have rank >=2; got rank %d.", rank);
}
const auto lower_diagonal_num_eqs =
ShapeUtil::GetDimension(lower_diagonal_shape, rank - 1);
const auto main_diagonal_num_eqs =
ShapeUtil::GetDimension(main_diagonal_shape, rank - 1);
const auto upper_diagonal_num_eqs =
ShapeUtil::GetDimension(upper_diagonal_shape, rank - 1);
const auto rhs_num_eqs = ShapeUtil::GetDimension(rhs_shape, rank - 1);
if (!((lower_diagonal_num_eqs == main_diagonal_num_eqs) &&
(lower_diagonal_num_eqs == upper_diagonal_num_eqs) &&
(lower_diagonal_num_eqs == rhs_num_eqs))) {
return InvalidArgument(
"All inputs should have the same innermost dimension but got "
"%d for lower diagonal, %d for diagonal, %d for upper diagonal, "
"%d for rhs",
lower_diagonal_num_eqs, main_diagonal_num_eqs, upper_diagonal_num_eqs,
rhs_num_eqs);
}
const auto num_equations = lower_diagonal_num_eqs;
TF_RETURN_IF_ERROR(CheckSecondToLastDimension(lower_diagonal_shape, rank, 1,
"lower diagonal"));
TF_RETURN_IF_ERROR(
CheckSecondToLastDimension(main_diagonal_shape, rank, 1, "diagonal"));
TF_RETURN_IF_ERROR(CheckSecondToLastDimension(upper_diagonal_shape, rank, 1,
"upper diagonal"));
return num_equations;
}
struct TridiagonalMatMulShapeParams {
int64_t rank;
int64_t m;
int64_t n;
PrimitiveType element_type;
};
absl::Status ValidateTridiagonalMatMulDiagonal(
const Shape& diagonal_shape, const std::string_view diagonal_name,
const Shape& rhs_shape) {
const int64_t diagonal_rank = diagonal_shape.rank();
const int64_t rhs_rank = rhs_shape.rank();
if (diagonal_rank != rhs_rank) {
return InvalidArgument("%s must have same rank as rhs, but got %d and %d.",
diagonal_name, diagonal_rank, rhs_rank);
}
for (int64_t i = 0; i < rhs_rank - 2; i++) {
const int64_t diagonal_dimension =
ShapeUtil::GetDimension(diagonal_shape, i);
const int64_t rhs_dimension = ShapeUtil::GetDimension(rhs_shape, i);
if (diagonal_dimension != rhs_dimension) {
return InvalidArgument(
"%s must have same outer dimensions as rhs, but for index %d, got %d "
"and %d.",
diagonal_name, i, diagonal_dimension, rhs_dimension);
}
}
if (const int64_t digonal_second_last_dimension =
ShapeUtil::GetDimension(diagonal_shape, rhs_rank - 2);
digonal_second_last_dimension != 1) {
return InvalidArgument(
"%s's second-to-last dimension must be 1, but got %d.", diagonal_name,
digonal_second_last_dimension);
}
const int64_t digonal_last_dimension =
ShapeUtil::GetDimension(diagonal_shape, rhs_rank - 1);
const int64_t rhs_second_last_dimension =
ShapeUtil::GetDimension(rhs_shape, rhs_rank - 2);
if (digonal_last_dimension != rhs_second_last_dimension) {
return InvalidArgument(
"%s's last dimension size must be rhs's second-to-last dimension size, "
"but got %d and %d.",
diagonal_name, digonal_last_dimension, rhs_second_last_dimension);
}
return absl::OkStatus();
}
absl::StatusOr<TridiagonalMatMulShapeParams>
CheckMatMulSystemAndReturnShapeParams(XlaOp upper_diagonal, XlaOp main_diagonal,
XlaOp lower_diagonal, XlaOp rhs) {
XlaBuilder* builder = upper_diagonal.builder();
TF_ASSIGN_OR_RETURN(const Shape upper_diagonal_shape,
builder->GetShape(upper_diagonal));
TF_ASSIGN_OR_RETURN(const Shape main_diagonal_shape,
builder->GetShape(main_diagonal));
TF_ASSIGN_OR_RETURN(const Shape lower_diagonal_shape,
builder->GetShape(lower_diagonal));
TF_ASSIGN_OR_RETURN(const Shape rhs_shape, builder->GetShape(rhs));
const int64_t rank = rhs_shape.rank();
if (rank < 2) {
return InvalidArgument("Input must have rank >= 2, but got %d.", rank);
}
TF_RETURN_IF_ERROR(ValidateTridiagonalMatMulDiagonal(upper_diagonal_shape,
"superdiag", rhs_shape));
TF_RETURN_IF_ERROR(ValidateTridiagonalMatMulDiagonal(main_diagonal_shape,
"maindiag", rhs_shape));
TF_RETURN_IF_ERROR(ValidateTridiagonalMatMulDiagonal(lower_diagonal_shape,
"subdiag", rhs_shape));
const int64_t rhs_height = ShapeUtil::GetDimension(rhs_shape, rank - 2);
const int64_t rhs_width = ShapeUtil::GetDimension(rhs_shape, rank - 1);
TridiagonalMatMulShapeParams shape_params;
shape_params.rank = rank;
shape_params.m = rhs_height;
shape_params.n = rhs_width;
shape_params.element_type = rhs_shape.element_type();
return shape_params;
}
XlaOp Coefficient(XlaOp operand, int32_t i) {
return DynamicSliceInMinorDims(operand,
{ConstantR0(operand.builder(), i)},
{1});
}
XlaOp Coefficient(XlaOp operand, XlaOp i) {
return DynamicSliceInMinorDims(operand,
{i}, {1});
}
XlaOp UpdateEq(XlaOp updated, int32_t i, XlaOp update) {
return DynamicUpdateSliceInMinorDims(
updated, update, {ConstantR0(updated.builder(), i)});
}
XlaOp UpdateEq(XlaOp updated, XlaOp i, XlaOp update) {
return DynamicUpdateSliceInMinorDims(updated, update, {i});
}
template <SolverAlgorithm algo>
absl::StatusOr<XlaOp> TridiagonalSolverImpl(XlaOp lower_diagonal,
XlaOp main_diagonal,
XlaOp upper_diagonal, XlaOp rhs);
template <>
absl::StatusOr<XlaOp> TridiagonalSolverImpl<kThomas>(XlaOp lower_diagonal,
XlaOp main_diagonal,
XlaOp upper_diagonal,
XlaOp rhs) {
XlaBuilder* builder = lower_diagonal.builder();
TF_ASSIGN_OR_RETURN(int64_t num_eqs,
CheckSystemAndReturnNumEquations(
lower_diagonal, main_diagonal, upper_diagonal, rhs));
XlaOp main_diag_after_elimination = ZerosLike(main_diagonal);
XlaOp rhs_after_elimination = ZerosLike(rhs);
XlaOp upper_diagonal_coeffs = ZerosLike(upper_diagonal);
XlaOp x_coeffs = ZerosLike(rhs);
main_diag_after_elimination =
UpdateEq(main_diag_after_elimination, 0, Coefficient(main_diagonal, 0));
rhs_after_elimination =
UpdateEq(rhs_after_elimination, 0, Coefficient(rhs, 0));
auto preparation_body_fn =
[](XlaOp i, absl::Span<const XlaOp> values,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto upper_diagonal_coeffs = values[0];
auto upper_diagonal = values[1];
upper_diagonal_coeffs =
UpdateEq(upper_diagonal_coeffs, i, Coefficient(upper_diagonal, i));
return std::vector<XlaOp>{upper_diagonal_coeffs, upper_diagonal};
};
TF_ASSIGN_OR_RETURN(auto values_after_preparation,
ForEachIndex(num_eqs - 1, S32, preparation_body_fn,
{upper_diagonal_coeffs, upper_diagonal},
"preparation", builder));
upper_diagonal_coeffs = values_after_preparation[0];
auto forward_transformation_fn =
[](XlaOp i_minus_one, absl::Span<const XlaOp> values,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto lower_diagonal = values[0];
auto main_diagonal = values[1];
auto rhs = values[2];
auto main_diag_after_elimination = values[3];
auto upper_diagonal_coeffs = values[4];
auto rhs_after_elimination = values[5];
auto one = ScalarLike(i_minus_one, 1);
auto i = i_minus_one + one;
auto lower_diagonal_i = Coefficient(lower_diagonal, i);
auto main_diagonal_i = Coefficient(main_diagonal, i);
auto rhs_i = Coefficient(rhs, i);
auto w_i =
lower_diagonal_i / Coefficient(main_diag_after_elimination, i - one);
main_diag_after_elimination = UpdateEq(
main_diag_after_elimination, i,
main_diagonal_i - w_i * Coefficient(upper_diagonal_coeffs, i - one));
rhs_after_elimination =
UpdateEq(rhs_after_elimination, i,
rhs_i - w_i * Coefficient(rhs_after_elimination, i - one));
return std::vector<XlaOp>{lower_diagonal,
main_diagonal,
rhs,
main_diag_after_elimination,
upper_diagonal_coeffs,
rhs_after_elimination};
};
TF_ASSIGN_OR_RETURN(
auto values_after_fwd_transformation,
ForEachIndex(
num_eqs - 1, S32, forward_transformation_fn,
{lower_diagonal, main_diagonal, rhs, main_diag_after_elimination,
upper_diagonal_coeffs, rhs_after_elimination},
"forward_transformation", builder));
lower_diagonal = values_after_fwd_transformation[0];
main_diagonal = values_after_fwd_transformation[1];
rhs = values_after_fwd_transformation[2];
main_diag_after_elimination = values_after_fwd_transformation[3];
upper_diagonal_coeffs = values_after_fwd_transformation[4];
rhs_after_elimination = values_after_fwd_transformation[5];
x_coeffs =
UpdateEq(x_coeffs, num_eqs - 1,
Coefficient(rhs_after_elimination, num_eqs - 1) /
Coefficient(main_diag_after_elimination, num_eqs - 1));
auto bwd_reduction_fn =
[num_eqs](XlaOp j, absl::Span<const XlaOp> values,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto x_coeffs = values[0];
auto rhs_after_elimination = values[1];
auto upper_diagonal_coeffs = values[2];
auto main_diag_after_elimination = values[3];
auto n = ScalarLike(j, num_eqs - 2);
auto one = ScalarLike(j, 1);
auto i = n - j;
x_coeffs = UpdateEq(x_coeffs, i,
(Coefficient(rhs_after_elimination, i) -
Coefficient(upper_diagonal_coeffs, i) *
Coefficient(x_coeffs, i + one)) /
Coefficient(main_diag_after_elimination, i));
return std::vector<XlaOp>{x_coeffs, rhs_after_elimination,
upper_diagonal_coeffs,
main_diag_after_elimination};
};
TF_ASSIGN_OR_RETURN(
auto values_after_bwd_reduction,
ForEachIndex(num_eqs - 1, S32, bwd_reduction_fn,
{x_coeffs, rhs_after_elimination, upper_diagonal_coeffs,
main_diag_after_elimination},
"backward_reduction", builder));
x_coeffs = values_after_bwd_reduction[0];
return x_coeffs;
}
}
absl::StatusOr<XlaOp> TridiagonalSolver(SolverAlgorithm algo,
XlaOp lower_diagonal,
XlaOp main_diagonal,
XlaOp upper_diagonal, XlaOp rhs) {
switch (algo) {
case kThomas:
return TridiagonalSolverImpl<kThomas>(lower_diagonal, main_diagonal,
upper_diagonal, rhs);
default:
return Unimplemented(
"Only algorithm kThomas (%d) is implemented, got: %d",
static_cast<int>(kThomas), algo);
}
}
absl::StatusOr<XlaOp> TridiagonalSolver(SolverAlgorithm algo, XlaOp diagonals,
XlaOp rhs) {
XlaBuilder* builder = diagonals.builder();
TF_ASSIGN_OR_RETURN(Shape diagonals_shape, builder->GetShape(diagonals));
const int64_t rank = diagonals_shape.rank();
auto upper_diagonal =
SliceInDim(diagonals, 0, 1,
1, rank - 2);
auto main_diagonal =
SliceInDim(diagonals, 1, 2,
1, rank - 2);
auto lower_diagonal =
SliceInDim(diagonals, 2, 3,
1, rank - 2);
std::vector<int64_t> transpose_order(rank);
std::iota(transpose_order.begin(), transpose_order.end(), 0);
transpose_order[rank - 2] = rank - 1;
transpose_order[rank - 1] = rank - 2;
rhs = Transpose(rhs, transpose_order);
switch (algo) {
case kThomas: {
TF_ASSIGN_OR_RETURN(
XlaOp x, TridiagonalSolverImpl<kThomas>(lower_diagonal, main_diagonal,
upper_diagonal, rhs));
return Transpose(x, transpose_order);
}
default:
return Unimplemented(
"Only algorithm kThomas (%d) is implemented, got: %d",
static_cast<int>(kThomas), algo);
}
}
absl::StatusOr<XlaOp> TridiagonalMatMul(XlaOp upper_diagonal,
XlaOp main_diagonal,
XlaOp lower_diagonal, XlaOp rhs) {
TF_ASSIGN_OR_RETURN(const TridiagonalMatMulShapeParams shape_params,
CheckMatMulSystemAndReturnShapeParams(
upper_diagonal, main_diagonal, lower_diagonal, rhs));
XlaBuilder* builder = main_diagonal.builder();
std::vector<int64_t> broadcasted_dims(shape_params.rank);
std::iota(broadcasted_dims.begin(), broadcasted_dims.end(), 0);
std::vector<int64_t> transpose_dims = broadcasted_dims;
std::swap(transpose_dims[shape_params.rank - 2],
transpose_dims[shape_params.rank - 1]);
main_diagonal = xla::Transpose(main_diagonal, transpose_dims);
XlaOp diag_part = xla::Mul(main_diagonal, rhs, broadcasted_dims);
upper_diagonal = SliceInMinorDims(upper_diagonal, {0},
{shape_params.m - 1});
upper_diagonal = xla::Transpose(upper_diagonal, transpose_dims);
XlaOp adjusted_upper_rhs = SliceInMinorDims(
rhs, {1, 0}, {shape_params.m, shape_params.n});
XlaOp upper_diag_part =
xla::Mul(upper_diagonal, adjusted_upper_rhs, broadcasted_dims);
upper_diag_part = xla::PadInDim(
upper_diag_part, xla::Zero(builder, shape_params.element_type),
shape_params.rank - 2, 0, 1);
lower_diagonal = SliceInMinorDims(lower_diagonal, {1},
{shape_params.m});
lower_diagonal = xla::Transpose(lower_diagonal, transpose_dims);
XlaOp adjusted_lower_rhs = SliceInMinorDims(
rhs, {0, 0}, {shape_params.m - 1, shape_params.n});
XlaOp lower_diag_part =
xla::Mul(lower_diagonal, adjusted_lower_rhs, broadcasted_dims);
lower_diag_part = xla::PadInDim(
lower_diag_part, xla::Zero(builder, shape_params.element_type),
shape_params.rank - 2, 1, 0);
return diag_part + upper_diag_part + lower_diag_part;
}
}
} | #include "xla/hlo/builder/lib/tridiagonal.h"
#include <cstdint>
#include <tuple>
#include <vector>
#include "absl/status/status.h"
#include "xla/array.h"
#include "xla/array3d.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace tridiagonal {
namespace {
class TridiagonalTest
: public ClientLibraryTestBase,
public ::testing::WithParamInterface<std::tuple<int, int, int>> {};
XLA_TEST_P(TridiagonalTest, SimpleTridiagonalMatMulOk) {
xla::XlaBuilder builder(TestName());
Array3D<float> upper_diagonal{{{34, 35, 999}}};
Array3D<float> main_diagonal{{{21, 22, 23}}};
Array3D<float> lower_diagonal{{{999, 10, 100}}};
Array3D<float> rhs{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}};
XlaOp upper_diagonal_xla;
XlaOp main_diagonal_xla;
XlaOp lower_diagonal_xla;
XlaOp rhs_xla;
auto upper_diagonal_data = CreateR3Parameter<float>(
upper_diagonal, 0, "upper_diagonal", &builder, &upper_diagonal_xla);
auto main_diagonal_data = CreateR3Parameter<float>(
main_diagonal, 1, "main_diagonal", &builder, &main_diagonal_xla);
auto lower_diagonal_data = CreateR3Parameter<float>(
lower_diagonal, 2, "lower_diagonal", &builder, &lower_diagonal_xla);
auto rhs_data = CreateR3Parameter<float>(rhs, 3, "rhs", &builder, &rhs_xla);
TF_ASSERT_OK_AND_ASSIGN(
XlaOp x, TridiagonalMatMul(upper_diagonal_xla, main_diagonal_xla,
lower_diagonal_xla, rhs_xla));
ASSERT_EQ(x.builder()->first_error(), absl::OkStatus());
ASSERT_TRUE(x.valid());
std::vector<int64_t> expected_shape{1, 3, 4};
std::vector<float> expected_values{191, 246, 301, 356, 435, 502,
569, 636, 707, 830, 953, 1076};
TF_ASSERT_OK_AND_ASSIGN(
auto result,
ComputeAndTransfer(x.builder(),
{upper_diagonal_data.get(), main_diagonal_data.get(),
lower_diagonal_data.get(), rhs_data.get()}));
EXPECT_EQ(result.shape().dimensions(), expected_shape);
EXPECT_EQ(result.data<float>({}), expected_values);
}
XLA_TEST_P(TridiagonalTest, TridiagonalMatMulWrongShape) {
xla::XlaBuilder builder(TestName());
Array<float> upper_diagonal = Array<float>({5, 3, 7}, 1);
Array<float> main_diagonal = Array<float>({5, 3, 7}, 1);
Array<float> lower_diagonal = Array<float>({5, 3, 7}, 1);
Array<float> rhs = Array<float>({5, 3, 7, 6}, 1);
XlaOp upper_diagonal_xla;
XlaOp main_diagonal_xla;
XlaOp lower_diagonal_xla;
XlaOp rhs_xla;
auto upper_diagonal_data = CreateParameter<float>(
upper_diagonal, 0, "upper_diagonal", &builder, &upper_diagonal_xla);
auto main_diagonal_data = CreateParameter<float>(
main_diagonal, 1, "main_diagonal", &builder, &main_diagonal_xla);
auto lower_diagonal_data = CreateParameter<float>(
lower_diagonal, 2, "lower_diagonal", &builder, &lower_diagonal_xla);
auto rhs_data = CreateParameter<float>(rhs, 3, "rhs", &builder, &rhs_xla);
auto result = TridiagonalMatMul(upper_diagonal_xla, main_diagonal_xla,
lower_diagonal_xla, rhs_xla);
ASSERT_EQ(result.status(),
InvalidArgument(
"superdiag must have same rank as rhs, but got 3 and 4."));
}
XLA_TEST_P(TridiagonalTest, Solves) {
const auto& spec = GetParam();
xla::XlaBuilder builder(TestName());
const int64_t batch_size = std::get<0>(spec);
const int64_t num_eqs = std::get<1>(spec);
const int64_t num_rhs = std::get<2>(spec);
Array3D<float> lower_diagonal(batch_size, 1, num_eqs);
Array3D<float> main_diagonal(batch_size, 1, num_eqs);
Array3D<float> upper_diagonal(batch_size, 1, num_eqs);
Array3D<float> rhs(batch_size, num_rhs, num_eqs);
lower_diagonal.FillRandom(1.0, 0.0, 0);
main_diagonal.FillRandom(0.05, 1.0,
batch_size * num_eqs);
upper_diagonal.FillRandom(1.0, 0.0,
2 * batch_size * num_eqs);
rhs.FillRandom(1.0, 0.0, 3 * batch_size * num_eqs);
XlaOp lower_diagonal_xla;
XlaOp main_diagonal_xla;
XlaOp upper_diagonal_xla;
XlaOp rhs_xla;
auto lower_diagonal_data = CreateR3Parameter<float>(
lower_diagonal, 0, "lower_diagonal", &builder, &lower_diagonal_xla);
auto main_diagonal_data = CreateR3Parameter<float>(
main_diagonal, 1, "main_diagonal", &builder, &main_diagonal_xla);
auto upper_diagonal_data = CreateR3Parameter<float>(
upper_diagonal, 2, "upper_diagonal", &builder, &upper_diagonal_xla);
auto rhs_data = CreateR3Parameter<float>(rhs, 3, "rhs", &builder, &rhs_xla);
TF_ASSERT_OK_AND_ASSIGN(
XlaOp x, TridiagonalSolver(kThomas, lower_diagonal_xla, main_diagonal_xla,
upper_diagonal_xla, rhs_xla));
auto Coefficient = [](auto operand, auto i) {
return SliceInMinorDims(operand, {i}, {i + 1});
};
std::vector<XlaOp> relative_errors(num_eqs);
for (int64_t i = 0; i < num_eqs; i++) {
auto a_i = Coefficient(lower_diagonal_xla, i);
auto b_i = Coefficient(main_diagonal_xla, i);
auto c_i = Coefficient(upper_diagonal_xla, i);
auto d_i = Coefficient(rhs_xla, i);
if (i == 0) {
relative_errors[i] =
(b_i * Coefficient(x, i) + c_i * Coefficient(x, i + 1) - d_i) / d_i;
} else if (i == num_eqs - 1) {
relative_errors[i] =
(a_i * Coefficient(x, i - 1) + b_i * Coefficient(x, i) - d_i) / d_i;
} else {
relative_errors[i] =
(a_i * Coefficient(x, i - 1) + b_i * Coefficient(x, i) +
c_i * Coefficient(x, i + 1) - d_i) /
d_i;
}
}
Abs(ConcatInDim(&builder, relative_errors, 2));
TF_ASSERT_OK_AND_ASSIGN(
auto result,
ComputeAndTransfer(&builder,
{lower_diagonal_data.get(), main_diagonal_data.get(),
upper_diagonal_data.get(), rhs_data.get()}));
auto result_data = result.data<float>({});
for (auto result_component : result_data) {
EXPECT_TRUE(result_component < 5e-3);
}
}
INSTANTIATE_TEST_CASE_P(TridiagonalTestInstantiation, TridiagonalTest,
::testing::Combine(::testing::Values(1, 12),
::testing::Values(4, 8),
::testing::Values(1, 12)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/tridiagonal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/tridiagonal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce3f61da-4768-4dc2-a05f-104fc015a518 | cpp | tensorflow/tensorflow | comparators | third_party/xla/xla/hlo/builder/lib/comparators.cc | third_party/xla/xla/hlo/builder/lib/comparators_test.cc | #include "xla/hlo/builder/lib/comparators.h"
#include <limits>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using XlaCompareOp = XlaOp (*)(XlaOp, XlaOp, absl::Span<const int64_t>);
XlaComputation CreateScalarComparisonComputation(
const std::string& name, const std::vector<PrimitiveType>& operand_types,
XlaBuilder* builder, XlaCompareOp generator) {
CHECK_NE(operand_types.size(), 0);
std::vector<std::optional<XlaCompareOp>> generators(operand_types.size());
generators[0] = generator;
return CreateScalarComparisonComputation(name, operand_types, generators,
builder);
}
}
XlaComputation CreateScalarComparisonComputation(
const std::string& name, const std::vector<PrimitiveType>& operand_types,
const std::vector<std::optional<XlaCompareOp>>& generators,
XlaBuilder* builder) {
auto b = builder->CreateSubBuilder(name);
if (operand_types.empty()) {
b->ReportError(InvalidArgument("operand_types should not be empty"));
return b->BuildAndNoteError();
}
CHECK_EQ(operand_types.size(), generators.size());
int parameter_count = 0;
int last_generator_index = 0;
std::vector<XlaOp> lhs_params;
std::vector<XlaOp> rhs_params;
for (auto operand_type : operand_types) {
auto scalar_shape = ShapeUtil::MakeShape(operand_type, {});
auto lhs_param = Parameter(b.get(), parameter_count * 2, scalar_shape,
absl::StrCat("p.", parameter_count, ".lhs"));
auto rhs_param = Parameter(b.get(), parameter_count * 2 + 1, scalar_shape,
absl::StrCat("p.", parameter_count, ".rhs"));
lhs_params.emplace_back(lhs_param);
rhs_params.emplace_back(rhs_param);
if (generators[parameter_count].has_value()) {
last_generator_index = parameter_count;
}
parameter_count++;
}
CHECK_NE(parameter_count, 0);
XlaOp result;
XlaOp prev_equal;
for (int i = 0; i < parameter_count; i++) {
if (generators[i].has_value()) {
XlaOp cmp_op = generators[i].value()(lhs_params[i], rhs_params[i], {});
result = prev_equal.valid() ? Select(prev_equal, cmp_op, result) : cmp_op;
if (i != last_generator_index) {
XlaOp eq_op = EqTotalOrder(lhs_params[i], rhs_params[i]);
prev_equal = prev_equal.valid() ? And(prev_equal, eq_op) : eq_op;
}
}
}
CHECK(result.valid());
return b->BuildAndNoteError();
}
XlaComputation CreateScalarLtComputation(
const std::vector<PrimitiveType>& operand_types, XlaBuilder* builder) {
return CreateScalarComparisonComputation("compare-less-than", operand_types,
builder, LtTotalOrder);
}
XlaComputation CreateScalarGtComputation(
const std::vector<PrimitiveType>& operand_types, XlaBuilder* builder) {
return CreateScalarComparisonComputation(
"compare-greater-than", operand_types, builder, GtTotalOrder);
}
} | #include "xla/hlo/builder/lib/comparators.h"
#include <cmath>
#include <limits>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace {
class ComparatorsTest : public ClientLibraryTestBase {
public:
ComparatorsTest() : builder_(TestName()) {}
XlaBuilder* builder() { return &builder_; }
private:
XlaBuilder builder_;
};
template <
PrimitiveType type,
typename T = typename primitive_util::PrimitiveTypeToNative<type>::type>
void BuildComparatorAndComparisons(ComparatorsTest* test,
bool compare_less_than,
absl::InlinedVector<bool, 10>* expected) {
auto compare = compare_less_than
? CreateScalarLtComputation({type}, test->builder())
: CreateScalarGtComputation({type}, test->builder());
auto negative_nan = ConstantR0<T>(
test->builder(), -T(std::numeric_limits<float>::quiet_NaN()));
auto positive_nan = ConstantR0<T>(test->builder(),
T(std::numeric_limits<float>::quiet_NaN()));
auto negative_zero = ConstantR0<T>(test->builder(), T(-0.));
auto positive_zero = ConstantR0<T>(test->builder(), T(0.));
auto negative_infinity = MinValue(test->builder(), type);
auto positive_infinity = MaxValue(test->builder(), type);
std::vector<XlaOp> all_constants{negative_nan, negative_infinity,
negative_zero, positive_zero,
positive_infinity, positive_nan};
std::vector<XlaOp> all_comparisons;
all_comparisons.reserve(std::pow(all_constants.size(), 2));
for (const XlaOp& lhs_constant : all_constants) {
for (const XlaOp& rhs_constant : all_constants) {
all_comparisons.push_back(Broadcast(
Call(test->builder(), compare, {lhs_constant, rhs_constant}), {1}));
}
}
ConcatInDim(test->builder(), all_comparisons, 0);
expected->clear();
for (int i = 0; i < all_constants.size(); ++i) {
for (int j = 0; j < all_constants.size(); ++j) {
expected->push_back(compare_less_than ? i < j : i > j);
}
}
}
XLA_TEST_F(ComparatorsTest, CompareLtBF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<BF16>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtBF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<BF16>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F16>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F16>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF32) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F32>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF32) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F32>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF64) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F64>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF64) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F64>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
const auto kCompareStr = HloOpcodeString(xla::HloOpcode::kCompare);
const auto kParameterStr = HloOpcodeString(xla::HloOpcode::kParameter);
const auto kSelectStr = HloOpcodeString(xla::HloOpcode::kSelect);
void ExpectCompareOp(
const xla::HloInstructionProto op, xla::PrimitiveType type,
absl::string_view direction, int parameter0_number, int parameter1_number,
const tsl::protobuf::RepeatedPtrField<xla::HloInstructionProto>& all_ops) {
EXPECT_EQ(op.opcode(), kCompareStr);
const auto& operand0 = all_ops.at(op.operand_ids(0) - 1);
EXPECT_EQ(operand0.opcode(), kParameterStr);
EXPECT_EQ(operand0.parameter_number(), parameter0_number);
EXPECT_EQ(operand0.shape().element_type(), type);
const auto& operand1 = all_ops.at(op.operand_ids(1) - 1);
EXPECT_EQ(operand1.opcode(), kParameterStr);
EXPECT_EQ(operand1.parameter_number(), parameter1_number);
EXPECT_EQ(operand1.shape().element_type(), type);
}
TEST(VariadicComparatorTest, OneOperandOneComparison) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16}, {LtTotalOrder}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 2);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
ExpectCompareOp(root, U16, "LT", 0, 1, instr);
}
TEST(VariadicComparatorTest, TwoOperandsOneComparison) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16, U32}, {LtTotalOrder, {}}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 4);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
ExpectCompareOp(root, U16, "LT", 0, 1, instr);
}
TEST(VariadicComparatorTest, TwoOperandsTwoComparisons) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16, U32}, {LtTotalOrder, LtTotalOrder}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 4);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
EXPECT_EQ(root.opcode(), HloOpcodeString(xla::HloOpcode::kSelect));
ExpectCompareOp(instr.at(root.operand_ids(0) - 1), U16, "EQ", 0, 1, instr);
ExpectCompareOp(instr.at(root.operand_ids(1) - 1), U32, "LT", 2, 3, instr);
ExpectCompareOp(instr.at(root.operand_ids(2) - 1), U16, "LT", 0, 1, instr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/comparators.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/comparators_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a4e2b06-7e2a-4885-9ad3-1270161a8c02 | cpp | tensorflow/tensorflow | qr | third_party/xla/xla/hlo/builder/lib/qr.cc | third_party/xla/xla/hlo/builder/lib/qr_test.cc | #include "xla/hlo/builder/lib/qr.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
QrDecomposition Qr(XlaOp a) {
auto result = [&]() -> absl::StatusOr<QrDecomposition> {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int num_dims = a_shape.rank();
if (num_dims < 2) {
return InvalidArgument(
"Arguments to QR must have rank >= 2: got shape %s",
a_shape.ToString());
}
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
std::vector<int64_t> taus_dims(a_shape.dimensions().begin(),
a_shape.dimensions().end());
taus_dims.pop_back();
taus_dims.back() = std::min(m, n);
auto taus_shape = ShapeUtil::MakeShape(a_shape.element_type(), taus_dims);
Shape qr_shape = ShapeUtil::MakeTupleShape({a_shape, taus_shape});
auto qr = CustomCall(a.builder(), "Qr", {a}, qr_shape);
a = GetTupleElement(qr, 0);
auto taus = GetTupleElement(qr, 1);
return QrDecomposition{a, taus};
}();
if (!result.ok()) {
XlaOp error = a.builder()->ReportError(result.status());
return QrDecomposition{error, error};
}
return result.value();
}
XlaOp ProductOfElementaryHouseholderReflectors(XlaOp a, XlaOp taus) {
XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
TF_ASSIGN_OR_RETURN(Shape taus_shape, builder->GetShape(taus));
if (a_shape.rank() < 2) {
return InvalidArgument(
"Matrix `a` must have >= 2 dimensions: got shape %s",
a_shape.ToString());
}
if (taus_shape.rank() + 1 != a_shape.rank()) {
return InvalidArgument(
"Matrix `taus` must have one fewer dimension than `a`: got shapes "
"%s and %s",
taus_shape.ToString(), a_shape.ToString());
}
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
if (m < n) {
return InvalidArgument(
"Argument to product of elementary Householder "
"reflectors must have m >= n, got shape %s",
a_shape.ToString());
}
absl::Span<const int64_t> a_batch_dims =
absl::MakeConstSpan(a_shape.dimensions().begin(),
a_shape.dimensions().begin() + a_shape.rank() - 2);
absl::Span<const int64_t> taus_batch_dims = absl::MakeConstSpan(
taus_shape.dimensions().begin(),
taus_shape.dimensions().begin() + taus_shape.rank() - 1);
const int64_t k = ShapeUtil::GetDimension(taus_shape, -1);
if (a_shape.element_type() != taus_shape.element_type() ||
a_batch_dims != taus_batch_dims || k > n) {
return InvalidArgument("Invalid shape for `taus`, got a=%s and taus=%s",
taus_shape.ToString(), a_shape.ToString());
}
return CustomCall(a.builder(), "ProductOfElementaryHouseholderReflectors",
{a, taus}, a_shape);
});
}
void QrExplicit(XlaOp a, bool full_matrices, XlaOp& q, XlaOp& r) {
absl::StatusOr<Shape> a_shape_or = a.builder()->GetShape(a);
if (!a_shape_or.ok()) {
q = a.builder()->ReportError(a_shape_or.status());
r = q;
return;
}
Shape a_shape = a_shape_or.value();
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
const int64_t p = std::min(m, n);
auto qr = Qr(a);
if (full_matrices) {
XlaOp t;
if (m < n) {
t = SliceInMinorDims(qr.q_and_r, {0, 0}, {m, m});
} else {
t = PadInDim(qr.q_and_r, Zero(a.builder(), a_shape.element_type()),
a_shape.dimensions_size() - 1, 0,
m - n);
}
q = ProductOfElementaryHouseholderReflectors(t, qr.taus);
r = UpperTriangle(qr.q_and_r);
} else {
XlaOp t;
if (m < n) {
t = SliceInMinorDims(qr.q_and_r, {0, 0}, {m, m});
} else {
t = qr.q_and_r;
}
q = ProductOfElementaryHouseholderReflectors(t, qr.taus);
q = SliceInMinorDims(q, {0, 0}, {m, p});
r = UpperTriangle(SliceInMinorDims(qr.q_and_r, {0, 0}, {p, n}));
}
}
} | #include "xla/hlo/builder/lib/qr.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace {
using QrTest = xla::ClientLibraryTestBase;
XLA_TEST_F(QrTest, Simple) {
xla::Array2D<float> data({
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
});
for (bool full_matrices : {false, true}) {
for (int64_t m : {3, 4}) {
for (int64_t n : {3, 4}) {
xla::XlaBuilder builder(TestName());
xla::XlaOp a, q, r;
xla::Array<float> a_vals = data.Slice({0, 0}, {m, n});
auto a_data = CreateParameter<float>(a_vals, 0, "a", &builder, &a);
xla::QrExplicit(a, full_matrices, q, r);
xla::BatchDot(q, r, xla::PrecisionConfig::HIGHEST);
TF_ASSERT_OK_AND_ASSIGN(xla::Shape q_shape, builder.GetShape(q));
TF_ASSERT_OK_AND_ASSIGN(xla::Shape r_shape, builder.GetShape(r));
EXPECT_EQ(q_shape,
xla::ShapeUtil::MakeShape(
xla::F32, {m, full_matrices ? m : std::min(m, n)}));
EXPECT_EQ(r_shape,
xla::ShapeUtil::MakeShape(
xla::F32, {full_matrices ? m : std::min(m, n), n}));
ComputeAndCompare<float>(&builder, a_vals, {a_data.get()},
xla::ErrorSpec(1e-4, 1e-4));
}
}
}
}
XLA_TEST_F(QrTest, ZeroDiagonal) {
xla::XlaBuilder builder(TestName());
xla::Array2D<float> a_vals({
{0, 1, 1},
{1, 0, 1},
{1, 1, 0},
});
xla::XlaOp a, q, r;
auto a_data = CreateR2Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::QrExplicit(a, true, q, r);
xla::BatchDot(q, r, xla::PrecisionConfig::HIGHEST);
ComputeAndCompareR2<float>(&builder, a_vals, {a_data.get()},
xla::ErrorSpec(1e-4, 1e-4));
}
XLA_TEST_F(QrTest, SimpleBatched) {
xla::XlaBuilder builder(TestName());
xla::Array3D<float> a_vals({
{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 456, 106},
{12, 48, 106, 62},
},
});
xla::XlaOp a, q, r;
auto a_data = CreateR3Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::QrExplicit(a, true, q, r);
xla::BatchDot(q, r, xla::PrecisionConfig::HIGHEST);
ComputeAndCompareR3<float>(&builder, a_vals, {a_data.get()},
xla::ErrorSpec(1e-4, 1e-4));
}
XLA_TEST_F(QrTest, SubnormalComplex) {
xla::Array2D<xla::complex64> a_vals({
{xla::complex64(4e-20, 5e-23), 6, 80},
{0, 45, 54},
{0, 54, 146},
});
xla::XlaBuilder builder(TestName());
xla::XlaOp a, q, r;
auto a_data = CreateParameter<xla::complex64>(a_vals, 0, "a", &builder, &a);
xla::QrExplicit(a, true, q, r);
xla::BatchDot(q, r, xla::PrecisionConfig::HIGHEST);
ComputeAndCompare<xla::complex64>(&builder, a_vals, {a_data.get()},
xla::ErrorSpec(1e-4, 1e-4));
}
XLA_TEST_F(QrTest, DuplicateHouseholderExpansion) {
xla::XlaBuilder builder(TestName());
xla::Array2D<float> a0_vals({
{0, 1, 1},
{1, 0, 1},
{1, 1, 0},
});
xla::Array2D<float> a1_vals({
{1, 0},
{0, 1},
{1, 0},
});
xla::XlaOp a0, q0, r0;
auto a0_data = CreateR2Parameter<float>(a0_vals, 0, "a0", &builder, &a0);
xla::QrExplicit(a0, true, q0, r0);
xla::XlaOp a1, q1, r1;
auto a1_data = CreateR2Parameter<float>(a1_vals, 1, "a1", &builder, &a1);
xla::QrExplicit(a1, true, q1, r1);
xla::BatchDot(q1, r1, xla::PrecisionConfig::HIGHEST);
ComputeAndCompareR2<float>(&builder, a1_vals, {a0_data.get(), a1_data.get()},
xla::ErrorSpec(1e-4, 1e-4));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/qr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/qr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90271454-bf3f-42dc-9c08-ea7578e1c82a | cpp | tensorflow/tensorflow | constants | tensorflow/lite/core/async/interop/c/constants.cc | third_party/xla/xla/hlo/builder/lib/constants_test.cc | #include "tensorflow/lite/core/async/interop/c/constants.h"
extern "C" {
const char kTfLiteSyncTypeNoSyncObj[] = "no_sync_obj";
} | #include "xla/hlo/builder/lib/constants.h"
#include <limits>
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ConstantsTest = ClientLibraryTestBase;
using ::testing::HasSubstr;
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4);
ComputeAndCompareR0<int32_t>(&builder, 4, {});
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32DoesNotAcceptFloats) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4.5);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("Invalid cast"));
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeF32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::F32, -7);
ComputeAndCompareR0<float>(&builder, -7, {});
ConstantR0WithType(&builder, xla::F32, 0.5);
ComputeAndCompareR0<float>(&builder, 0.5, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeS32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<int32_t>(&builder, 42), -3);
ComputeAndCompareR0<int32_t>(&builder, -3, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeF32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<float>(&builder, 42.75), -3.2);
ComputeAndCompareR0<float>(&builder, -3.2, {});
}
XLA_TEST_F(ConstantsTest, ZeroS32) {
XlaBuilder builder(TestName());
Zero(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 0, {});
}
XLA_TEST_F(ConstantsTest, ZeroF32) {
XlaBuilder builder(TestName());
Zero(&builder, F32);
ComputeAndCompareR0<float>(&builder, 0.0, {});
}
XLA_TEST_F(ConstantsTest, ZerosS32) {
XlaBuilder builder(TestName());
Zeros(&builder, ShapeUtil::MakeShape(S32, {2, 2}));
ComputeAndCompareR2<int32_t>(&builder, {{0, 0}, {0, 0}}, {});
}
XLA_TEST_F(ConstantsTest, ZerosLikeF32) {
XlaBuilder builder(TestName());
ZerosLike(ConstantR1<float>(&builder, {1., 2., 3.}));
ComputeAndCompareR1<float>(&builder, {0., 0., 0.}, {});
}
XLA_TEST_F(ConstantsTest, OneS32) {
XlaBuilder builder(TestName());
One(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 1, {});
}
XLA_TEST_F(ConstantsTest, OneF32) {
XlaBuilder builder(TestName());
One(&builder, F32);
ComputeAndCompareR0<float>(&builder, 1., {});
}
XLA_TEST_F(ConstantsTest, EpsilonF32) {
XlaBuilder builder(TestName());
Epsilon(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::epsilon(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueS32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueS32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueF32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueF32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MinValueS32) {
XlaBuilder builder(TestName());
MinValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueS32) {
XlaBuilder builder(TestName());
MaxValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinValueF32) {
XlaBuilder builder(TestName());
MinValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueF32) {
XlaBuilder builder(TestName());
MaxValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, NanValueF32) {
XlaBuilder builder(TestName());
NanValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::quiet_NaN(),
{});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/c/constants.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/constants_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a2e11f9f-5cdd-42c4-8ac0-3ccdbc5b0ec6 | cpp | tensorflow/tensorflow | logdet | third_party/xla/xla/hlo/builder/lib/logdet.cc | third_party/xla/xla/hlo/builder/lib/logdet_test.cc | #include "xla/hlo/builder/lib/logdet.h"
#include <limits>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/qr.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
SignAndLogDet SLogDet(XlaOp a) {
absl::StatusOr<SignAndLogDet> result =
[&]() -> absl::StatusOr<SignAndLogDet> {
TF_ASSIGN_OR_RETURN(Shape a_shape, a.builder()->GetShape(a));
auto qr = Qr(a);
int64_t m = ShapeUtil::GetDimension(a_shape, -2);
int64_t n = ShapeUtil::GetDimension(a_shape, -1);
if (m != n) {
return InvalidArgument(
"Arguments to logdet must be (batched) square matrices, got: %s",
a_shape.ToString());
}
auto log_abs_det = Einsum(Log(Abs(qr.q_and_r)), "...aa->...");
auto sign_diag = Reduce(
Sign(Einsum(qr.q_and_r, "...aa->...a")),
One(a.builder(), a_shape.element_type()),
CreateScalarMultiplyComputation(a_shape.element_type(), a.builder()),
{a_shape.rank() - 2});
auto sliced_taus = SliceInMinorDims(qr.taus, {0}, {n - 1});
auto sign_taus = Reduce(
Select(Ne(sliced_taus, ZerosLike(sliced_taus)),
FullLike(sliced_taus, -1), FullLike(sliced_taus, 1)),
One(a.builder(), a_shape.element_type()),
CreateScalarMultiplyComputation(a_shape.element_type(), a.builder()),
{a_shape.rank() - 2});
return SignAndLogDet{sign_diag * sign_taus, log_abs_det};
}();
if (!result.ok()) {
XlaOp error = a.builder()->ReportError(result.status());
return SignAndLogDet{error, error};
}
return result.value();
}
XlaOp LogDet(XlaOp a) {
SignAndLogDet slogdet = SLogDet(a);
return Select(
Ge(slogdet.sign, ZerosLike(slogdet.sign)), slogdet.logdet,
FullLike(slogdet.logdet, std::numeric_limits<float>::quiet_NaN()));
}
} | #include "xla/hlo/builder/lib/logdet.h"
#include <limits>
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
namespace {
using LogDetTest = xla::ClientLibraryTestBase;
XLA_TEST_F(LogDetTest, Simple) {
xla::XlaBuilder builder(TestName());
xla::Array2D<float> a_vals({
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
});
xla::XlaOp a;
auto a_data = CreateR2Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR0<float>(1.f),
xla::LiteralUtil::CreateR0<float>(14.1601f),
xla::LiteralUtil::CreateR0<float>(14.1601f));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
XLA_TEST_F(LogDetTest, SimpleTriangle) {
xla::XlaBuilder builder(TestName());
xla::Array2D<float> a_vals({
{4, 6, 8, 10},
{4, -39, 62, 73},
{0, 0, -146, 166},
{4, 6, 8, 320},
});
xla::XlaOp a;
auto a_data = CreateR2Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR0<float>(1.f),
xla::LiteralUtil::CreateR0<float>(15.9131355f),
xla::LiteralUtil::CreateR0<float>(15.9131355f));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
XLA_TEST_F(LogDetTest, SimpleBatched) {
xla::XlaBuilder builder(TestName());
xla::Array3D<float> a_vals({
{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 456, 106},
{12, 48, 106, 62},
},
{{2, 2, 3, 4}, {4, 5, 6, 7}, {7, 8, 9, 8}, {10, 11, 12, 13}},
{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}},
});
xla::XlaOp a;
auto a_data = CreateR3Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR1<float>({1.f, 1.f, -1.f, 0.f}),
xla::LiteralUtil::CreateR1<float>(
{14.1601f, 14.3092f, 2.4849f,
-std::numeric_limits<float>::infinity()}),
xla::LiteralUtil::CreateR1<float>(
{14.1601f, 14.3092f, std::numeric_limits<float>::quiet_NaN(),
-std::numeric_limits<float>::infinity()}));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
XLA_TEST_F(LogDetTest, LogdetOfLargerMatricesBatched) {
xla::XlaBuilder builder(TestName());
xla::Array<float> a_vals = {
{{7.2393, 1.1413, 4.1883, -4.8272, 3.2831, -0.0568, -2.4776},
{0.4347, 3.4095, 1.6259, -4.7100, 1.5942, 1.4217, -2.8009},
{3.6964, 0.4882, 6.5276, -1.2128, 1.3851, 0.7417, -3.8515},
{-3.7986, -5.1188, -1.9410, 14.0205, -5.4515, 3.1831, 5.1488},
{1.5621, 3.0426, 1.4819, -4.5938, 10.1397, 4.9312, -2.8351},
{-1.5436, -0.0287, -0.1139, 4.4499, 2.5894, 6.1216, 2.7201},
{-3.7241, -2.7670, -3.8162, 4.5961, -1.7251, -0.4190, 8.6562}},
{{3.3789, -2.3607, -1.2471, 2.1503, 0.6062, -0.6057, 1.7748},
{-1.8670, 11.0947, 0.1229, 0.0599, 3.1714, -4.7941, -4.5442},
{-0.6905, -0.0829, 5.2156, 2.9528, 2.6200, 6.1638, 1.8652},
{3.0521, 2.2174, 0.7444, 10.7268, 0.6443, -2.7732, 1.6840},
{1.8479, 3.0821, 4.5671, 2.9254, 6.1338, 5.2066, 2.3662},
{-0.0360, -5.5341, 5.9687, -0.3297, 2.1174, 13.0016, 4.0118},
{0.4380, -4.6683, 3.1548, 0.0924, 0.7176, 6.4679, 6.1819}},
{{10.0487, 4.0350, -0.8471, -1.2887, -0.8172, -3.3698, 1.3191},
{4.8678, 4.6081, 0.8419, -0.2454, -3.2599, -1.2386, 2.4070},
{1.4877, 0.8362, 2.6077, 1.1782, -0.1116, 1.7130, -1.1883},
{-0.9245, -0.7435, -0.9456, 2.5936, 1.9887, -0.1324, -0.1453},
{0.2918, -0.5301, -0.8775, 1.0478, 8.9262, 2.4731, -0.4393},
{-3.5759, -1.5619, 2.4410, 1.3046, 4.2678, 7.3587, -4.0935},
{-1.1187, 0.9150, -1.8253, 0.0390, -2.5684, -4.0778, 4.1447}}};
xla::XlaOp a;
auto a_data = CreateParameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f}),
xla::LiteralUtil::CreateR1<float>({8.93788053, 6.77846303, 7.4852403}),
xla::LiteralUtil::CreateR1<float>({8.93788053, 6.77846303, 7.4852403}));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/logdet.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/logdet_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3cdbd40-8d4c-4144-9518-6b84e52b8158 | cpp | tensorflow/tensorflow | svd | third_party/xla/xla/hlo/builder/lib/svd.cc | third_party/xla/xla/hlo/builder/lib/svd_test.cc | #include "xla/hlo/builder/lib/svd.h"
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct HouseHolderResult {
XlaOp v;
XlaOp beta;
XlaOp a;
};
struct JacobiRotation {
XlaOp c;
XlaOp s;
};
struct JacobiUpdate {
XlaOp v;
XlaOp w;
};
struct OneSidedJacobiRotation {
JacobiRotation rot_l;
JacobiRotation rot_r;
};
absl::StatusOr<HouseHolderResult> HouseRow(
XlaOp a, XlaOp i, XlaOp j, XlaOp eps,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
XlaOp zero = ScalarLike(i, 0);
XlaOp x = DynamicSliceInMinorDims(a, {i, zero}, {1, n});
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int k = 0; k < num_batch_dims; ++k) {
batch_dims[k] = ShapeUtil::GetDimension(a_shape, k);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
auto idx = Iota(builder, ShapeUtil::MakeShape(S32, x_shape.dimensions()),
num_dims - 1);
auto zeros = ZerosLike(x);
auto v = Select(Gt(idx, j), x, zeros);
auto one = ScalarLike(v, 1.0);
auto sigma =
Sqrt(Reduce(Square(v), ScalarLike(v, 0.0),
CreateScalarAddComputation(x_shape.element_type(), builder),
{num_dims - 1}));
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
auto x_0j = DynamicSliceInMinorDims(x, {zero, j}, {1, 1});
auto mu = Mul(sigma, Sqrt(Square(Div(x_0j, sigma, broadcast_dims)) + one),
broadcast_dims);
auto v_0j = Select(
Le(x_0j, ScalarLike(x_0j, 0.0)), Sub(x_0j, mu),
-Mul(sigma, Div(sigma, Add(x_0j, mu), broadcast_dims), broadcast_dims));
auto beta = Div(ScalarLike(v_0j, 2.0),
(Square(Div(sigma, v_0j, broadcast_dims)) + one));
v = Select(
BroadcastInDim(Lt(sigma, eps), x_shape.dimensions(), broadcast_dims), v,
v / v_0j);
v = Select(Eq(idx, j), zeros + one, v);
beta = Select(Lt(Add(sigma, ZerosLike(beta), broadcast_dims), eps),
ZerosLike(beta), beta);
HouseHolderResult result;
result.v = v;
result.beta = beta;
result.a = Sub(a, Mul(beta, BatchDot(BatchDot(a, false, v, true, precision),
v, precision)));
return result;
}
absl::StatusOr<HouseHolderResult> HouseCol(
XlaOp a, XlaOp i, XlaOp j, XlaOp eps,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
XlaOp zero = ScalarLike(i, 0);
XlaOp x = DynamicSliceInMinorDims(a, {zero, j}, {m, 1});
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int k = 0; k < num_batch_dims; ++k) {
batch_dims[k] = ShapeUtil::GetDimension(a_shape, k);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
auto idx = Iota(builder, ShapeUtil::MakeShape(S32, x_shape.dimensions()),
num_dims - 2);
auto zeros = ZerosLike(x);
auto v = Select(Gt(idx, i), x, zeros);
auto one = ScalarLike(v, 1.0);
auto sigma =
Sqrt(Reduce(Square(v), ScalarLike(v, 0.0),
CreateScalarAddComputation(x_shape.element_type(), builder),
{num_dims - 2}));
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[num_dims - 2] = num_dims - 1;
auto x_0i = DynamicSliceInMinorDims(x, {i, zero}, {1, 1});
auto mu = Mul(sigma, Sqrt(Square(Div(x_0i, sigma, broadcast_dims)) + one),
broadcast_dims);
auto v_0i = Select(
Le(x_0i, ScalarLike(x_0i, 0.0)), Sub(x_0i, mu),
-Mul(sigma, Div(sigma, Add(x_0i, mu), broadcast_dims), broadcast_dims));
auto beta = Div(ScalarLike(v_0i, 2.0),
(Square(Div(sigma, v_0i, broadcast_dims)) + one));
v = Select(
BroadcastInDim(Lt(sigma, eps), x_shape.dimensions(), broadcast_dims), v,
v / v_0i);
v = Select(Eq(idx, i), zeros + one, v);
beta = Select(Lt(Add(sigma, ZerosLike(beta), broadcast_dims), eps),
ZerosLike(beta), beta);
HouseHolderResult result;
result.v = v;
result.beta = beta;
result.a = Sub(
a, Mul(beta, BatchDot(v, false, BatchDot(v, true, a, false, precision),
false, precision)));
return result;
}
absl::StatusOr<SVDResult> HouseHolderBidiagonalization(
XlaOp a, XlaOp eps, PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int i = 0; i < num_batch_dims; ++i) {
batch_dims[i] = ShapeUtil::GetDimension(a_shape, i);
}
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
XlaOp u_init = Broadcast(
IdentityMatrix(builder, a_shape.element_type(), m, m), batch_dims);
XlaOp v_init = Broadcast(
IdentityMatrix(builder, a_shape.element_type(), n, n), batch_dims);
auto while_cond_fn = [&](absl::Span<const XlaOp> values,
XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> {
auto i = values[0];
return Lt(i, ScalarLike(i, n - 2));
};
auto while_body_fn =
[&](absl::Span<const XlaOp> values,
XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto i = values[0];
auto one = ScalarLike(i, 1);
auto u = values[1];
auto v = values[2];
auto a = values[3];
auto eps = values[4];
TF_ASSIGN_OR_RETURN(HouseHolderResult house_col,
HouseCol(a, i, i, eps, precision));
u = Sub(u,
Mul(house_col.beta, BatchDot(BatchDot(u, house_col.v, precision),
false, house_col.v, true, precision)));
a = house_col.a;
TF_ASSIGN_OR_RETURN(HouseHolderResult house_row,
HouseRow(a, i, i + one, eps, precision));
v = Sub(v, Mul(house_row.beta,
BatchDot(BatchDot(v, false, house_row.v, true, precision),
house_row.v, precision)));
a = house_row.a;
std::vector<XlaOp> updated_values;
updated_values.reserve(values.size());
updated_values.push_back(i + one);
updated_values.push_back(u);
updated_values.push_back(v);
updated_values.push_back(a);
updated_values.push_back(eps);
return updated_values;
};
std::vector<XlaOp> values(5);
values[0] = Zero(builder, S32);
values[1] = u_init;
values[2] = v_init;
values[3] = a;
values[4] = eps;
TF_ASSIGN_OR_RETURN(values,
WhileLoopHelper(while_cond_fn, while_body_fn, values,
"HouseHolderBidiagonalization", builder));
for (int k = 2; k > 0; --k) {
if (n - k >= 0) {
XlaOp index = ScalarLike(values[0], n - k);
TF_ASSIGN_OR_RETURN(HouseHolderResult house_col,
HouseCol(values[3], index, index, eps, precision));
values[1] = Sub(values[1],
Mul(house_col.beta,
BatchDot(BatchDot(values[1], house_col.v, precision),
false, house_col.v, true, precision)));
values[3] = house_col.a;
}
}
SVDResult result;
result.u = values[1];
result.v = values[2];
result.d = values[3];
return result;
}
absl::StatusOr<JacobiRotation> MakeJacobi(XlaOp ps, XlaOp qs, XlaOp pqs,
XlaOp eps) {
auto zero = ScalarLike(ps, 0.0);
auto one = ScalarLike(ps, 1.0);
auto two = ScalarLike(ps, 2.0);
auto tau = (qs - ps) / (pqs * two);
auto t_pos = one / (tau + Sqrt(one + Square(tau)));
auto t_neg = -one / (-tau + Sqrt(one + Square(tau)));
auto t = Select(Ge(tau, zero), t_pos, t_neg);
auto c_temp = Rsqrt(one + Square(t));
auto s_temp = t * c_temp;
auto c = Select(Ge(Abs(pqs), eps), c_temp, ZerosLike(c_temp) + one);
auto s = Select(Ge(Abs(pqs), eps), s_temp, ZerosLike(s_temp));
auto rnorm = Rsqrt(Square(c) + Square(s));
JacobiRotation rot;
rot.c = c * rnorm;
rot.s = s * rnorm;
return rot;
}
absl::StatusOr<OneSidedJacobiRotation> GetOneSidedJacobiRotation(XlaOp a,
XlaOp p,
XlaOp q,
XlaOp eps) {
XlaOp a_pp = DynamicSliceInMinorDims(a, {p, p}, {1, 1});
XlaOp a_pq = DynamicSliceInMinorDims(a, {p, q}, {1, 1});
XlaOp a_qp = DynamicSliceInMinorDims(a, {q, p}, {1, 1});
XlaOp a_qq = DynamicSliceInMinorDims(a, {q, q}, {1, 1});
XlaOp one = ScalarLike(a, 1.0);
XlaOp t = a_pp + a_qq;
XlaOp d = a_qp - a_pq;
XlaOp u = Div(t, d);
XlaOp tmp = Rsqrt(one + Square(u));
JacobiRotation rot;
XlaOp zeros = ZerosLike(tmp);
XlaOp ones = zeros + one;
rot.s = Select(Lt(Abs(d), eps), zeros, -tmp);
rot.c = Select(Lt(Abs(d), eps), ones, Mul(u, tmp));
XlaOp a_pp_new = rot.c * a_pp - rot.s * a_qp;
XlaOp a_pq_new = rot.c * a_pq - rot.s * a_qq;
XlaOp a_qq_new = rot.s * a_pq + rot.c * a_qq;
OneSidedJacobiRotation rots;
TF_ASSIGN_OR_RETURN(rots.rot_r,
MakeJacobi(a_pp_new, a_qq_new, a_pq_new, eps));
rots.rot_l.c = rot.c * rots.rot_r.c - rot.s * rots.rot_r.s;
rots.rot_l.s = rot.s * rots.rot_r.c + rot.c * rots.rot_r.s;
return rots;
}
absl::StatusOr<SVDResult> OneSidedJacobiUpdate(SVDResult svd_result, XlaOp p,
XlaOp q, XlaOp eps) {
XlaOp u = svd_result.u;
XlaOp v = svd_result.v;
XlaOp d = svd_result.d;
XlaBuilder* builder = d.builder();
TF_ASSIGN_OR_RETURN(Shape d_shape, builder->GetShape(d));
const int64_t num_dims = d_shape.rank();
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int i = 0; i < num_batch_dims; ++i) {
batch_dims[i] = ShapeUtil::GetDimension(d_shape, i);
}
const int64_t m = ShapeUtil::GetDimension(d_shape, -2);
const int64_t n = ShapeUtil::GetDimension(d_shape, -1);
TF_ASSIGN_OR_RETURN(OneSidedJacobiRotation onesided_jacobi,
GetOneSidedJacobiRotation(d, p, q, eps));
auto zero = ScalarLike(p, 0);
std::vector<int64_t> pq_dims(batch_dims.begin(), batch_dims.end());
pq_dims.push_back(1);
pq_dims.push_back(1);
auto pq_zero = ScalarLike(d, 0.0);
auto pq_zeros = Broadcast(pq_zero, pq_dims);
std::vector<int64_t> broadcast_dims(batch_dims.size());
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims.push_back(num_dims - 1);
auto slice_p = DynamicSliceInMinorDims(d, {p, zero}, {1, n});
auto slice_q = DynamicSliceInMinorDims(d, {q, zero}, {1, n});
auto slice_p_new =
onesided_jacobi.rot_l.c * slice_p - onesided_jacobi.rot_l.s * slice_q;
auto slice_q_new =
onesided_jacobi.rot_l.s * slice_p + onesided_jacobi.rot_l.c * slice_q;
d = DynamicUpdateSliceInMinorDims(d, slice_p_new, {p, zero});
d = DynamicUpdateSliceInMinorDims(d, slice_q_new, {q, zero});
slice_p = DynamicSliceInMinorDims(d, {zero, p}, {m, 1});
slice_q = DynamicSliceInMinorDims(d, {zero, q}, {m, 1});
slice_p_new =
onesided_jacobi.rot_r.c * slice_p - onesided_jacobi.rot_r.s * slice_q;
slice_q_new =
onesided_jacobi.rot_r.s * slice_p + onesided_jacobi.rot_r.c * slice_q;
d = DynamicUpdateSliceInMinorDims(d, slice_p_new, {zero, p});
d = DynamicUpdateSliceInMinorDims(d, slice_q_new, {zero, q});
d = DynamicUpdateSliceInMinorDims(d, pq_zeros, {p, q});
d = DynamicUpdateSliceInMinorDims(d, pq_zeros, {q, p});
slice_p = DynamicSliceInMinorDims(u, {zero, p}, {m, 1});
slice_q = DynamicSliceInMinorDims(u, {zero, q}, {m, 1});
slice_p_new =
onesided_jacobi.rot_l.c * slice_p - onesided_jacobi.rot_l.s * slice_q;
slice_p_new = Mul(
slice_p_new,
Rsqrt(Reduce(Square(slice_p_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
slice_q_new =
onesided_jacobi.rot_l.s * slice_p + onesided_jacobi.rot_l.c * slice_q;
slice_q_new = Mul(
slice_q_new,
Rsqrt(Reduce(Square(slice_q_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
u = DynamicUpdateSliceInMinorDims(u, slice_p_new, {zero, p});
u = DynamicUpdateSliceInMinorDims(u, slice_q_new, {zero, q});
slice_p = DynamicSliceInMinorDims(v, {zero, p}, {n, 1});
slice_q = DynamicSliceInMinorDims(v, {zero, q}, {n, 1});
slice_p_new =
onesided_jacobi.rot_r.c * slice_p - onesided_jacobi.rot_r.s * slice_q;
slice_p_new = Mul(
slice_p_new,
Rsqrt(Reduce(Square(slice_p_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
slice_q_new =
onesided_jacobi.rot_r.s * slice_p + onesided_jacobi.rot_r.c * slice_q;
slice_q_new = Mul(
slice_q_new,
Rsqrt(Reduce(Square(slice_q_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
v = DynamicUpdateSliceInMinorDims(v, slice_p_new, {zero, p});
v = DynamicUpdateSliceInMinorDims(v, slice_q_new, {zero, q});
svd_result.d = d;
svd_result.u = u;
svd_result.v = v;
return svd_result;
}
absl::StatusOr<XlaOp> ComputeToleranceComparison(XlaOp w, XlaOp epsilon) {
XlaBuilder* builder = w.builder();
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(w));
auto num_dims = static_cast<int32_t>(shape.rank());
int64_t n = shape.dimensions(num_dims - 1);
shape.set_dimensions(num_dims - 2, n);
auto w_sliced = SliceInMinorDims(w, {0, 0}, {n, n});
auto diag = GetMatrixDiagonal(w_sliced);
diag = Select(Lt(diag, ZerosLike(diag)), -diag, diag);
std::vector<int64_t> broadcasted_dims(num_dims - 1);
std::iota(broadcasted_dims.begin(), broadcasted_dims.end(), 0);
auto broadcast_to_rows =
BroadcastInDim(diag, shape.dimensions(), broadcasted_dims);
broadcasted_dims.back() = num_dims - 1;
auto broadcast_to_columns =
BroadcastInDim(diag, shape.dimensions(), broadcasted_dims);
XlaOp tolerance;
if (builder->GetShape(epsilon)->element_type() == BF16 ||
builder->GetShape(epsilon)->element_type() == F16) {
auto upscale_eps = ConvertElementType(epsilon, F32);
tolerance = ConvertElementType(broadcast_to_rows, F32) *
ConvertElementType(broadcast_to_columns, F32) * upscale_eps *
upscale_eps;
tolerance = ConvertElementType(tolerance,
builder->GetShape(epsilon)->element_type());
} else {
tolerance = broadcast_to_rows * broadcast_to_columns * epsilon * epsilon;
}
return Lt(tolerance, Square(Select(GetDiagonalMask(w_sliced),
ZerosLike(w_sliced), w_sliced)));
}
absl::StatusOr<std::vector<XlaOp>> WhileLoopFn(
absl::Span<const XlaOp> initial_values,
int matrix_dimension,
int max_sweep_updates,
absl::string_view name,
XlaBuilder* builder) {
auto while_cond_fn = [&](absl::Span<const XlaOp> values,
XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> {
auto k = values[0];
auto max_sweeps = ScalarLike(k, max_sweep_updates);
auto sweep_update_cond = Gt(max_sweeps, k);
TF_ASSIGN_OR_RETURN(auto tolerance_comparison,
ComputeToleranceComparison(values[3], values[4]));
auto tolerance_cond = ReduceAll(
tolerance_comparison, xla::ConstantR0<bool>(cond_builder, false),
CreateScalarOrComputation(PRED, cond_builder));
return And(sweep_update_cond, tolerance_cond);
};
auto while_body_fn =
[&](absl::Span<const XlaOp> values,
XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto while_cond_fn_inner =
[&](absl::Span<const XlaOp> values_inner,
XlaBuilder* inner_cond_builder) -> absl::StatusOr<XlaOp> {
auto p = values_inner[0];
return Lt(p, ScalarLike(p, matrix_dimension - 1));
};
auto while_body_fn_inner = [&](absl::Span<const XlaOp> values_inner,
XlaBuilder* inner_body_builder)
-> absl::StatusOr<std::vector<XlaOp>> {
auto while_cond_fn_innermost =
[&](absl::Span<const XlaOp> values_innermost,
XlaBuilder* innermost_cond_builder) -> absl::StatusOr<XlaOp> {
auto q = values_innermost[1];
return Lt(q, ScalarLike(q, matrix_dimension));
};
auto while_body_fn_innermost =
[&](absl::Span<const XlaOp> values_innermost,
XlaBuilder* innermost_body_builder)
-> absl::StatusOr<std::vector<XlaOp>> {
auto p = values_innermost[0];
auto q = values_innermost[1];
SVDResult onesided_jacobi_update;
onesided_jacobi_update.u = values_innermost[2];
onesided_jacobi_update.v = values_innermost[3];
onesided_jacobi_update.d = values_innermost[4];
auto eps = values_innermost[5];
TF_ASSIGN_OR_RETURN(
onesided_jacobi_update,
OneSidedJacobiUpdate(onesided_jacobi_update, p, q, eps));
std::vector<XlaOp> updated_values_innermost;
updated_values_innermost.reserve(values_innermost.size());
updated_values_innermost.push_back(p);
updated_values_innermost.push_back(q + ScalarLike(q, 1));
updated_values_innermost.push_back(onesided_jacobi_update.u);
updated_values_innermost.push_back(onesided_jacobi_update.v);
updated_values_innermost.push_back(onesided_jacobi_update.d);
updated_values_innermost.push_back(eps);
return updated_values_innermost;
};
std::vector<XlaOp> values_innermost(6);
auto p = values_inner[0];
auto q = p + ScalarLike(p, 1);
values_innermost[0] = p;
values_innermost[1] = q;
values_innermost[2] = values_inner[1];
values_innermost[3] = values_inner[2];
values_innermost[4] = values_inner[3];
values_innermost[5] = values_inner[4];
TF_ASSIGN_OR_RETURN(
values_innermost,
WhileLoopHelper(while_cond_fn_innermost, while_body_fn_innermost,
values_innermost, absl::StrCat(name, "-Innermost"),
inner_body_builder));
std::vector<XlaOp> updated_values_inner;
updated_values_inner.reserve(values_inner.size());
updated_values_inner.push_back(p + ScalarLike(p, 1));
updated_values_inner.push_back(values_innermost[2]);
updated_values_inner.push_back(values_innermost[3]);
updated_values_inner.push_back(values_innermost[4]);
updated_values_inner.push_back(values_innermost[5]);
return updated_values_inner;
};
XlaOp k = values[0];
std::vector<XlaOp> values_inner(5);
values_inner[0] = ScalarLike(k, 0);
values_inner[1] = values[1];
values_inner[2] = values[2];
values_inner[3] = values[3];
values_inner[4] = values[4];
TF_ASSIGN_OR_RETURN(
values_inner,
WhileLoopHelper(while_cond_fn_inner, while_body_fn_inner, values_inner,
absl::StrCat(name, "-Inner"), body_builder));
std::vector<XlaOp> updated_values;
updated_values.reserve(values_inner.size());
updated_values.push_back(k + ScalarLike(k, 1));
updated_values.push_back(values_inner[1]);
updated_values.push_back(values_inner[2]);
updated_values.push_back(values_inner[3]);
updated_values.push_back(values_inner[4]);
return updated_values;
};
std::vector<XlaOp> values;
TF_ASSIGN_OR_RETURN(values, WhileLoopHelper(while_cond_fn, while_body_fn,
initial_values, name, builder));
return values;
}
absl::StatusOr<SVDResult> SortBySingularValuesAndPostProcessing(
SVDResult result) {
XlaBuilder* builder = result.d.builder();
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(result.d));
const int64_t num_dims = shape.rank();
auto dimensions = shape.dimensions();
const int64_t m = ShapeUtil::GetDimension(shape, -2);
const int64_t n = ShapeUtil::GetDimension(shape, -1);
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[num_dims - 2] = num_dims - 1;
auto d = GetMatrixDiagonal(result.d);
auto zeros = ZerosLike(d);
auto one = ScalarLike(d, 1.0);
auto sign = Select(Ge(d, zeros), zeros + one, zeros - one);
d = Select(Ge(d, zeros), d, -d);
result.v = Mul(result.v, sign, broadcast_dims);
d = BroadcastInDim(d, dimensions, broadcast_dims);
XlaOp sort_u_result =
Sort({d, SliceInMinorDims(result.u, {0, 0}, {m, n})},
CreateScalarGtComputation(
{shape.element_type(), shape.element_type()}, builder),
num_dims - 1);
XlaOp sort_v_result =
Sort({SliceInMinorDims(d, {0, 0}, {n, n}), result.v},
CreateScalarGtComputation(
{shape.element_type(), shape.element_type()}, builder),
num_dims - 1);
result.d = GetMatrixDiagonal(GetTupleElement(sort_v_result, 0));
result.v = GetTupleElement(sort_v_result, 1);
result.v = Mul(
result.v,
Rsqrt(Reduce(Square(result.v), ScalarLike(d, 0.0),
CreateScalarAddComputation(shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
result.u = ConcatInDim(builder,
{GetTupleElement(sort_u_result, 1),
SliceInMinorDims(result.u, {0, n}, {m, m})},
num_dims - 1);
result.u = Mul(
result.u,
Rsqrt(Reduce(Square(result.u), ScalarLike(d, 0.0),
CreateScalarAddComputation(shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
return result;
}
}
SVDResult SVD(XlaOp a, int64_t max_iter, float epsilon,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
auto return_error = [&](const absl::Status& status) {
SVDResult result;
result.u = builder->ReportError(status);
result.v = builder->ReportError(status);
result.d = builder->ReportError(status);
return result;
};
auto shape_with_status = builder->GetShape(a);
if (!shape_with_status.status().ok()) {
return return_error(shape_with_status.status());
}
Shape a_shape = shape_with_status.value();
const int64_t num_dims = a_shape.rank();
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int i = 0; i < num_batch_dims; ++i) {
batch_dims[i] = ShapeUtil::GetDimension(a_shape, i);
}
int64_t m = ShapeUtil::GetDimension(a_shape, -2);
int64_t n = ShapeUtil::GetDimension(a_shape, -1);
bool maybe_transpose = m < n;
if (maybe_transpose) {
a = TransposeInMinorDims(a);
std::swap(m, n);
}
auto eps = ScalarLike(a, epsilon);
auto svd_result_or = HouseHolderBidiagonalization(a, eps, precision);
if (!svd_result_or.ok()) {
return return_error(svd_result_or.status());
}
SVDResult svd_result = svd_result_or.value();
auto output_with_status = WhileLoopFn(
{
Zero(builder, S32),
svd_result.u,
svd_result.v,
svd_result.d,
eps,
},
n,
max_iter,
"CyclicOneSidedJacobi",
builder);
if (!output_with_status.status().ok()) {
return return_error(output_with_status.status());
}
auto output = output_with_status.value();
svd_result.u = output[1];
svd_result.v = output[2];
svd_result.d = output[3];
svd_result_or = SortBySingularValuesAndPostProcessing(svd_result);
if (!svd_result_or.ok()) {
return return_error(svd_result_or.status());
}
svd_result = svd_result_or.value();
if (maybe_transpose) {
std::swap(svd_result.u, svd_result.v);
}
return svd_result;
}
} | #include "xla/hlo/builder/lib/svd.h"
#include <numeric>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
class SVDTest : public ClientLibraryTestBase {
protected:
void SetUp() override {
ClientLibraryTestBase::SetUp();
batch_3d_4x5_ = Array3D<float>{
{
{4, 6, 8, 10, 1},
{6, 45, 54, 63, 1},
{8, 54, 146, 166, 1},
{10, 63, 166, 310, 1},
},
{
{16, 24, 8, 12, 6},
{24, 61, 82, 48, 5},
{8, 82, 100, 6, 4},
{12, 48, 6, 62, 3},
},
};
}
void TearDown() override { ClientLibraryTestBase::TearDown(); }
Array3D<float> GetUnitMatrix3D(int32_t batch_dim, int32_t mat_dim) {
Array3D<float> result(batch_dim, mat_dim, mat_dim, 0.0);
for (int i = 0; i < batch_dim; ++i) {
for (int j = 0; j < mat_dim; ++j) {
result({i, j, j}) = 1.0;
}
}
return result;
}
XlaOp ComputeMatmulUDVT(SVDResult result, XlaBuilder* builder) {
Shape u_shape = builder->GetShape(result.u).value();
Shape v_shape = builder->GetShape(result.v).value();
int64_t m = ShapeUtil::GetDimension(u_shape, -1);
int64_t n = ShapeUtil::GetDimension(v_shape, -1);
auto v = result.v;
auto u = result.u;
auto d = result.d;
if (m > n) {
u = SliceInMinorDims(u, {0, 0}, {m, n});
} else if (m < n) {
v = SliceInMinorDims(v, {0, 0}, {n, m});
}
int num_dims = u_shape.rank();
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[num_dims - 2] = num_dims - 1;
return BatchDot(Mul(u, d, broadcast_dims), TransposeInMinorDims(v),
PrecisionConfig::HIGHEST);
}
XlaOp GetAverageAbsoluteError(XlaOp m1, XlaOp m2, XlaBuilder* builder) {
Shape shape = builder->GetShape(m1).value();
int64_t size = 1;
for (auto d : shape.dimensions()) {
size *= d;
}
return ReduceAll(Abs(m1 - m2), ConstantR0WithType(builder, F32, 0),
CreateScalarAddComputation(F32, builder)) /
ConstantR0WithType(builder, F32, size);
}
Array2D<float> GenerateRandomMatrix(int xsize, int ysize) {
Array2D<float> result{xsize, ysize, 0.0};
result.FillRandom(10 , 2 );
return result;
}
Array3D<float> batch_3d_4x5_;
};
XLA_TEST_F(SVDTest, Simple2D) {
XlaBuilder builder(TestName());
Array2D<float> simple_2d_4x4_ = Array2D<float>{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
};
XlaOp a;
auto a_data = CreateR2Parameter<float>(simple_2d_4x4_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-6);
ComputeMatmulUDVT(result, &builder);
ComputeAndCompareR2<float>(&builder, simple_2d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Test_VWVt_EQ_A_2x4x5) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
ComputeMatmulUDVT(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x5_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Test_Orthogonality_U) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
ComputeMatmulUDVT(result, &builder);
BatchDot(result.u, TransposeInMinorDims(result.u));
ComputeAndCompareR3<float>(&builder, GetUnitMatrix3D(2, 4), {a_data.get()},
ErrorSpec(1e-2, 1e-2));
}
XLA_TEST_F(SVDTest, Test_Orthogonality_V) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
BatchDot(result.v, TransposeInMinorDims(result.v), PrecisionConfig::HIGHEST);
ComputeAndCompareR3<float>(&builder, GetUnitMatrix3D(2, 5), {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, TestSingleValuesMatchNumpy) {
XlaBuilder builder(TestName());
auto singular_values = Array2D<float>{
{431.05153007, 49.88334164, 20.94464584, 3.24845468},
{179.73128591, 68.05162245, 21.77679503, 13.94319712},
};
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
Add(result.d, ZerosLike(result.d));
ComputeAndCompareR2<float>(&builder, singular_values, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest,
DISABLED_ON_INTERPRETER(Various_Size_Random_Matrix_512x128)) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(512, 128);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Various_Size_Random_Matrix_128x256) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(128, 256);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Various_Size_Random_Matrix_256x128) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(256, 128);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest,
DISABLED_ON_INTERPRETER(Various_Size_Random_Matrix_128x512)) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(128, 512);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, DISABLED_ON_CPU(DISABLED_ON_INTERPRETER(
Various_Size_Random_Matrix_512x256))) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(512, 256);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, DISABLED_ON_GPU(DISABLED_ON_CPU(DISABLED_ON_INTERPRETER(
Various_Size_Random_Matrix_512x512)))) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(512, 512);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/svd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/svd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b007e8c5-3217-4f91-b2a8-1b4f0950743e | cpp | tensorflow/tensorflow | pooling | tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc | tensorflow/lite/delegates/gpu/cl/kernels/pooling_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/pooling.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::Status GenerateMaxPoolingCode(const Pooling2DAttributes& attr,
const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
if (attr.padding.prepended.h > attr.kernel.h ||
attr.padding.prepended.w > attr.kernel.w) {
return absl::InvalidArgumentError("Padding is bigger than kernel.");
}
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"stride", int2(attr.strides.w, attr.strides.h)},
{"offset", int2(attr.padding.prepended.w, attr.padding.prepended.h)},
{"window_h", attr.kernel.h},
{"window_w", attr.kernel.w},
};
std::string source = R"(
const highp float inf = -(1.0f / 0.0f);
value_0 = vec4(inf);)";
if (attr.output_indices) {
source += R"(
ivec4 value_1;
)";
}
source += R"(
ivec2 base_coord = gid.xy * $stride$ - $offset$;
for (int a = 0; a < $window_h$; ++a) {
for (int b = 0; b < $window_w$; ++b) {
ivec2 coord = base_coord + ivec2(b, a);
if (coord.x < 0 || coord.y < 0 || coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
}
vec4 input_ = $input_data_0[coord.x, coord.y, gid.z]$;)";
if (attr.output_indices) {
source += R"(
int window_index = a * $window_w$ + b;
if (input_.x > value_0.x) value_1.x = window_index;
if (input_.y > value_0.y) value_1.y = window_index;
if (input_.z > value_0.z) value_1.z = window_index;
if (input_.w > value_0.w) value_1.w = window_index;)";
}
source += R"(
value_0 = max(value_0, input_);
}
}
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
absl::Status GenerateAveragePoolingCode(
const Pooling2DAttributes& attr, const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"stride", int2(attr.strides.w, attr.strides.h)},
{"offset", int2(attr.padding.prepended.w, attr.padding.prepended.h)},
{"window_h", attr.kernel.h},
{"window_w", attr.kernel.w},
};
auto x_in_bounds = [input_width = ctx.input_shapes[0][2],
kernel_width = attr.kernel.w](int64_t x) -> bool {
return 0 <= x && x + kernel_width <= input_width;
};
auto y_in_bounds = [input_height = ctx.input_shapes[0][1],
kernel_height = attr.kernel.h](int64_t y) -> bool {
return 0 <= y && y + kernel_height <= input_height;
};
const int64_t output_shape_max_y = ctx.output_shapes[0][1] - 1;
const int64_t output_shape_max_x = ctx.output_shapes[0][2] - 1;
const int64_t base_x = -attr.padding.prepended.w;
const int64_t base_y = -attr.padding.prepended.h;
const bool bounds_check_necessary =
!(x_in_bounds(base_x) &&
x_in_bounds(base_x + output_shape_max_x * attr.strides.w) &&
y_in_bounds(base_y) &&
y_in_bounds(base_y + output_shape_max_y * attr.strides.h));
std::string source = bounds_check_necessary ?
R"(
int window_size = 0;
for (int a = 0; a < $window_h$; ++a) {
for (int b = 0; b < $window_w$; ++b) {
ivec2 coord = gid.xy * $stride$ - $offset$ + ivec2(b, a);
if (coord.x >= 0 && coord.y >= 0 && coord.x < $input_data_0_w$ && coord.y < $input_data_0_h$) {
value_0 += $input_data_0[coord.x, coord.y, gid.z]$;
window_size++;
}
}
}
value_0 /= float(window_size);
)"
:
R"(
for (int a = 0; a < $window_h$; ++a) {
for (int b = 0; b < $window_w$; ++b) {
ivec2 coord = gid.xy * $stride$ - $offset$ + ivec2(b, a);
value_0 += $input_data_0[coord.x, coord.y, gid.z]$;
}
}
value_0 /= float($window_h$ * $window_w$);
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
class Pooling : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const Pooling2DAttributes&>(ctx.op_attr);
switch (attr.type) {
case PoolingType::AVERAGE:
return GenerateAveragePoolingCode(attr, ctx, generated_code);
case PoolingType::MAX:
return GenerateMaxPoolingCode(attr, ctx, generated_code);
default:
return absl::InvalidArgumentError("Incorrect attributes' type.");
}
}
};
}
std::unique_ptr<NodeShader> NewPoolingNodeShader() {
return std::make_unique<Pooling>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/pooling_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, AveragePooling) {
auto status = AveragePoolingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, AveragePoolingNonEmptyPadding) {
auto status = AveragePoolingNonEmptyPaddingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, MaxPooling) {
auto status = MaxPoolingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, MaxPoolingIndices) {
auto status = MaxPoolingIndicesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/pooling_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
816e5959-39dc-40a9-be92-53a02da28dc4 | cpp | tensorflow/tensorflow | sorting | third_party/xla/xla/hlo/builder/lib/sorting.cc | third_party/xla/xla/service/gpu/tests/sorting_test.cc | #include "xla/hlo/builder/lib/sorting.h"
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaOp TopK(XlaOp input, int64_t k, PrimitiveType index_type) {
XlaBuilder* const builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
int last_dim = input_shape.dimensions_size() - 1;
int64_t last_dim_size = input_shape.dimensions(last_dim);
const int64_t kPerPartitionSize = 8192;
const int64_t kLastDimSizeThreshold = 524288;
const int64_t kMinNumPartitions = 8;
const int64_t kMinimalK = 1000;
if ((k >= kMinimalK) && (k < kPerPartitionSize) &&
(kPerPartitionSize / k > 2) && last_dim_size >= kLastDimSizeThreshold) {
int64_t num_partitions =
CeilOfRatio(last_dim_size - k, kPerPartitionSize - k);
if (num_partitions >= kMinNumPartitions) {
return TopKWithPartitions(input, k, num_partitions, index_type);
}
}
Shape iota_shape =
ShapeUtil::MakeShape(index_type, input_shape.dimensions());
XlaOp iota = Iota(builder, iota_shape, last_dim);
for (int64_t i = 0; i < input_shape.rank(); ++i) {
if (input_shape.is_dynamic_dimension(i)) {
iota = SetDimensionSize(iota, GetDimensionSize(input, i), i);
}
}
auto input_dims = input_shape.dimensions();
constexpr int32_t kLow16BitsLimit = int32_t{1} << 16;
constexpr int32_t kLow16BitsMask = kLow16BitsLimit - 1;
constexpr int32_t kHigh16BitsMask = ~kLow16BitsMask;
constexpr int kMaxLastDimSizeForSmallBatches = 1500;
constexpr int kSmallBatchSizeThreshold = 8;
const bool use_packed_bf16_sort =
(input_shape.element_type() == BF16 &&
last_dim_size < kLow16BitsLimit &&
(last_dim_size < kMaxLastDimSizeForSmallBatches ||
(input_shape.rank() == 2 &&
input_shape.dimensions(0) >= kSmallBatchSizeThreshold)));
std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end());
limit_indices[last_dim] = k;
std::vector<int64_t> strides(input_shape.dimensions_size(), 1);
XlaOp values;
XlaOp indices;
if (use_packed_bf16_sort) {
auto sign_magnitude_to_from_ones_complement = [builder](const XlaOp in) {
constexpr int32_t kAllNonSignBits = 0x7fffffff;
XlaOp in_s32 = BitcastConvertType(in, S32);
return Xor(
And(in_s32, ConstantR0<int32_t>(builder, kAllNonSignBits)),
ShiftRightArithmetic(in_s32, ConstantR0<int32_t>(builder, 31)));
};
XlaOp input_f32_trimmed =
Or(sign_magnitude_to_from_ones_complement(
BitcastConvertType(ConvertElementType(input, F32), S32)),
ConstantR0<int32_t>(builder, kLow16BitsMask));
XlaOp input_and_iota = Xor(input_f32_trimmed, iota);
XlaOp sort_result_raw =
Sort({input_and_iota},
CreateScalarGtComputation({index_type}, builder), last_dim,
false);
sort_result_raw =
Slice(sort_result_raw, start_indices, limit_indices, strides);
sort_result_raw = RemoveDynamicDimension(sort_result_raw, last_dim);
values = ConvertElementType(
BitcastConvertType(
And(sign_magnitude_to_from_ones_complement(sort_result_raw),
ConstantR0<int32_t>(builder, kHigh16BitsMask)),
F32),
BF16);
indices = And(
Xor(sort_result_raw, ConstantR0<int32_t>(builder, kLow16BitsMask)),
ConstantR0<int32_t>(builder, kLow16BitsMask));
} else {
XlaOp sort_result =
Sort({input, iota},
CreateScalarGtComputation(
{input_shape.element_type(), index_type}, iota.builder()),
last_dim, true);
values = Slice(GetTupleElement(sort_result, 0), start_indices,
limit_indices, strides);
values = RemoveDynamicDimension(values, last_dim);
indices = Slice(GetTupleElement(sort_result, 1), start_indices,
limit_indices, strides);
indices = RemoveDynamicDimension(indices, last_dim);
}
return Tuple(builder, {values, indices});
});
}
XlaOp TopKWithPartitions(XlaOp input, int64_t k, int64_t num_partitions,
PrimitiveType index_type) {
XlaBuilder* const builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
int last_dim = input_shape.dimensions_size() - 1;
auto input_dims = input_shape.dimensions();
int64_t last_dim_size = input_shape.dimensions(last_dim);
const int64_t per_partition_size =
CeilOfRatio(last_dim_size, num_partitions);
if (k >= per_partition_size) {
return TopK(input, k, index_type);
}
Shape iota_shape =
ShapeUtil::MakeShape(index_type, input_shape.dimensions());
XlaOp iota = Iota(builder, iota_shape, last_dim);
for (int64_t i = 0; i < input_shape.rank(); ++i) {
if (input_shape.is_dynamic_dimension(i)) {
iota = SetDimensionSize(iota, GetDimensionSize(input, i), i);
}
}
auto topk_body_fn =
[&](XlaOp partition, absl::Span<const XlaOp> values_and_indices,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto values = values_and_indices[0];
auto indices = values_and_indices[1];
auto input = values_and_indices[2];
auto iota = values_and_indices[3];
XlaOp start =
Mul(Add(partition, One(builder, index_type)),
ConstantR0WithType(builder, index_type, per_partition_size));
XlaOp sliced_input =
DynamicSliceInMinorDims(input, {start}, {per_partition_size});
XlaOp sliced_indices =
DynamicSliceInMinorDims(iota, {start}, {per_partition_size});
sliced_input = ConcatInDim(builder, {values, sliced_input}, last_dim);
sliced_indices =
ConcatInDim(builder, {indices, sliced_indices}, last_dim);
XlaOp sort_result = Sort(
{sliced_input, sliced_indices},
CreateScalarGtComputation({input_shape.element_type(), index_type},
sliced_indices.builder()),
last_dim, true);
std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end());
std::vector<int64_t> strides(input_shape.dimensions_size(), 1);
start_indices[last_dim] = 0;
limit_indices[last_dim] = k;
values = Slice(GetTupleElement(sort_result, 0), start_indices,
limit_indices, strides);
indices = Slice(GetTupleElement(sort_result, 1), start_indices,
limit_indices, strides);
return std::vector<XlaOp>{values, indices, input, iota};
};
std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end());
std::vector<int64_t> strides(input_shape.dimensions_size(), 1);
start_indices[last_dim] = 0;
limit_indices[last_dim] = per_partition_size;
XlaOp sliced_input = Slice(input, start_indices, limit_indices, strides);
XlaOp sliced_indices = Slice(iota, start_indices, limit_indices, strides);
XlaOp sort_result =
Sort({sliced_input, sliced_indices},
CreateScalarGtComputation({input_shape.element_type(), index_type},
sliced_indices.builder()),
last_dim, true);
start_indices[last_dim] = 0;
limit_indices[last_dim] = k;
XlaOp values = Slice(GetTupleElement(sort_result, 0), start_indices,
limit_indices, strides);
XlaOp indices = Slice(GetTupleElement(sort_result, 1), start_indices,
limit_indices, strides);
TF_ASSIGN_OR_RETURN(
auto values_and_indices,
ForEachIndex(num_partitions - 1, index_type, topk_body_fn,
{values, indices, input, iota}, "topk_with_partition",
builder));
return Tuple(builder, {values_and_indices[0], values_and_indices[1]});
});
}
} | #include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "Eigen/Core"
#include "xla/error_spec.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class SortingTest : public GpuCodegenTest {
protected:
SortingTest() {}
};
TEST_F(SortingTest, Regression1) {
const char* hlo_text = R"(
HloModule TestModule
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY TestComputation {
x = f32[3, 2]{1, 0} parameter(0)
x.copy = f32[3, 2]{0, 1} copy(x)
ROOT sort = f32[3, 2]{0, 1} sort(x.copy), dimensions={1}, to_apply=compare
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
static constexpr int kRadixSortTestSize = 100000;
template <typename T>
bool CheckOrder(T lhs, T rhs, bool asc, int pos) {
if (asc) {
EXPECT_TRUE(lhs <= rhs) << lhs << " > " << rhs << " @" << pos;
} else {
EXPECT_TRUE(lhs >= rhs) << lhs << " < " << rhs << " @" << pos;
}
return lhs != rhs;
}
bool CompareAdjacentValues(const Literal& literal, int index, bool ascending) {
if (primitive_util::IsFloatingPointType(literal.shape().element_type())) {
return CheckOrder(*literal.GetAsDouble({index - 1}),
*literal.GetAsDouble({index}), ascending, index);
} else {
return CheckOrder(*literal.GetIntegralAsS64({index - 1}),
*literal.GetIntegralAsS64({index}), ascending, index);
}
}
std::string GetTypeName(PrimitiveType type) {
return absl::AsciiStrToLower(PrimitiveType_Name(type));
}
class CubSortKeysTest : public GpuCodegenTest,
public ::testing::WithParamInterface<
std::tuple<std::shared_ptr<Literal>, bool>> {};
TEST_P(CubSortKeysTest, SortKeys) {
constexpr char kHloTemplate[] = R"(
HloModule TestModule
ENTRY %main {
%input = $0[$1] parameter(0)
%sort = ($0[$1], u8[$2]) custom-call(%input),
custom_call_target="__cub$$DeviceRadixSort",
backend_config="{\"descending\": $3}"
ROOT %gte = get-tuple-element(%sort), index=0
}
)";
bool ascending = std::get<1>(GetParam());
std::string hlo = absl::Substitute(
kHloTemplate,
GetTypeName(std::get<0>(GetParam())->shape().element_type()),
kRadixSortTestSize,
kRadixSortTestSize * 10,
ascending ? "false" : "true");
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
std::vector<Literal*> literals = {std::get<0>(GetParam()).get()};
auto result = ExecuteAndTransfer(std::move(module), literals);
bool has_diff = false;
for (int i = 1; i < kRadixSortTestSize; ++i) {
has_diff |= CompareAdjacentValues(result, i, ascending);
}
EXPECT_TRUE(has_diff) << "uninitialized output";
}
class CubSortPairsTest
: public GpuCodegenTest,
public ::testing::WithParamInterface<
std::tuple<std::shared_ptr<Literal>, PrimitiveType, bool>> {};
TEST_P(CubSortPairsTest, SortPairs) {
constexpr char kHloTemplate[] = R"(
HloModule TestModule
ENTRY %main {
%keys = $0[$2] parameter(0)
%values = $1[$2] convert(%keys)
ROOT %sort = ($0[$2], $1[$2], u8[$3]) custom-call(%keys, %values),
custom_call_target="__cub$$DeviceRadixSort",
backend_config="{\"descending\": $4}"
}
)";
bool ascending = std::get<2>(GetParam());
std::string hlo = absl::Substitute(
kHloTemplate,
GetTypeName(std::get<0>(GetParam())->shape().element_type()),
GetTypeName(std::get<1>(GetParam())), kRadixSortTestSize,
kRadixSortTestSize * 20,
ascending ? "false" : "true");
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
std::vector<Literal*> literals = {std::get<0>(GetParam()).get()};
auto result_tuple = ExecuteAndTransfer(std::move(module), literals);
std::vector<Literal> result = result_tuple.DecomposeTuple();
bool has_diff = false;
for (int i = 1; i < kRadixSortTestSize; ++i) {
has_diff |= CompareAdjacentValues(result[0], i, ascending);
has_diff |= CompareAdjacentValues(result[1], i, ascending);
}
EXPECT_TRUE(has_diff) << "uninitialized output";
}
template <PrimitiveType P, typename T>
std::shared_ptr<Literal> CreateRandomLiteral(T mean, T stddev) {
Shape shape = ShapeUtil::MakeShape(P, {kRadixSortTestSize});
auto maybe_literal =
LiteralUtil::CreateRandomLiteral<P, T>(shape, mean, stddev);
CHECK_OK(maybe_literal);
auto shared_literal = std::make_shared<Literal>(shape);
CHECK_OK(shared_literal->MoveFrom(std::move(*maybe_literal)));
return shared_literal;
}
INSTANTIATE_TEST_SUITE_P(
TestRadixSort, CubSortKeysTest,
::testing::Combine(
::testing::Values(
CreateRandomLiteral<F16, half>(
half(), Eigen::half_impl::float_to_half_rtne(1)),
CreateRandomLiteral<F32, float>(0, 1),
CreateRandomLiteral<F64, double>(0, 1),
CreateRandomLiteral<S8, int8_t>(0, 10),
CreateRandomLiteral<S16, int16_t>(0, 1000),
CreateRandomLiteral<S32, int32_t>(0, 1000000),
CreateRandomLiteral<U8, uint8_t>(128, 10),
CreateRandomLiteral<U16, uint16_t>(32768, 1000),
CreateRandomLiteral<U32, uint32_t>(1 << 30, 1000000)),
::testing::Bool()),
[](const ::testing::TestParamInfo<CubSortKeysTest::ParamType>& info) {
return absl::StrCat(
PrimitiveType_Name(std::get<0>(info.param)->shape().element_type()),
"_", std::get<1>(info.param) ? "asc" : "desc");
});
INSTANTIATE_TEST_SUITE_P(
TestRadixSort, CubSortPairsTest,
::testing::Combine(
::testing::Values(CreateRandomLiteral<U16, uint16_t>(32768, 1000),
CreateRandomLiteral<U32, uint32_t>(32768, 1000),
CreateRandomLiteral<U64, uint64_t>(32768, 1000)),
::testing::Values(F16, F32, F64), ::testing::Bool()),
[](const ::testing::TestParamInfo<CubSortPairsTest::ParamType>& info) {
return absl::StrCat(
PrimitiveType_Name(std::get<0>(info.param)->shape().element_type()),
"_", PrimitiveType_Name(std::get<1>(info.param)), "_",
std::get<2>(info.param) ? "asc" : "desc");
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/sorting.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/tests/sorting_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5e142f2-7a29-4a6e-950a-b8e0e50b01db | cpp | tensorflow/tensorflow | self_adjoint_eig | third_party/xla/xla/hlo/builder/lib/self_adjoint_eig.cc | third_party/xla/xla/hlo/builder/lib/self_adjoint_eig_test.cc | #include "xla/hlo/builder/lib/self_adjoint_eig.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
SelfAdjointEigResult SelfAdjointEig(XlaOp a, bool lower, int64_t max_iter,
float tol, bool sort_eigenvalues) {
XlaBuilder* builder = a.builder();
XlaOp result = builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
if (num_dims < 2) {
return InvalidArgument(
"Arguments to Eigen decomposition must have rank >= 2: got shape %s.",
a_shape.ToString());
}
PrimitiveType type = a_shape.element_type();
if (!primitive_util::IsFloatingPointType(type) &&
!primitive_util::IsComplexType(type)) {
return InvalidArgument(
"Type of the input matrix must be floating point "
"or complex: got %s.",
a_shape.ToString());
}
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
if (m != n) {
return InvalidArgument(
"Arguments to symmetric eigendecomposition must be square matrices: "
"got shape (%d, %d).",
m, n);
}
const int num_batch_dims = a_shape.dimensions().size() - 2;
const std::vector<int64_t> batch_dims(
a_shape.dimensions().begin(),
a_shape.dimensions().begin() + num_batch_dims);
PrimitiveType eigvals_type =
primitive_util::IsComplexType(type)
? primitive_util::ComplexComponentType(type)
: type;
std::vector<int64_t> eigvals_dims = batch_dims;
eigvals_dims.push_back(m);
Shape eigh_shape = ShapeUtil::MakeTupleShape(
{a_shape, ShapeUtil::MakeShape(eigvals_type, eigvals_dims)});
std::string opaque =
absl::StrFormat("%d,%d,%d,%f", lower, sort_eigenvalues, max_iter, tol);
return CustomCall(a.builder(), "Eigh", {a}, eigh_shape, opaque);
});
return SelfAdjointEigResult{GetTupleElement(result, 0),
GetTupleElement(result, 1)};
}
} | #include "xla/hlo/builder/lib/self_adjoint_eig.h"
#include <algorithm>
#include <numeric>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
class SelfAdjointEigTest : public ClientLibraryTestBase {
protected:
void SetUp() override {
ClientLibraryTestBase::SetUp();
batch_3d_4x4_ = Array3D<float>{
{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 100, 6},
{12, 48, 6, 62},
},
};
matrix2d_8x8_ = Array2D<float>{
{14., 123., 49., 112., 115., 173., 182., 125.},
{123., 14., 60., 118., 150., 130., 91., 72.},
{49., 60., 138., 111., 106., 101., 115., 142.},
{112., 118., 111., 142., 91., 130., 25., 61.},
{115., 150., 106., 91., 116., 121., 128., 85.},
{173., 130., 101., 130., 121., 70., 151., 132.},
{182., 91., 115., 25., 128., 151., 66., 92.},
{125., 72., 142., 61., 85., 132., 92., 156.},
};
low_rank_4x4_ = Array2D<float>{
{2, 1, 4, 3},
{1, 5, 5, 9},
{4, 5, 10, 11},
{3, 9, 11, 17},
};
}
void TearDown() override { ClientLibraryTestBase::TearDown(); }
Array3D<float> GetUnitMatrix3D(const Array3D<float>& matrix) {
Array3D<float> result(matrix.n1(), matrix.n2(), matrix.n3(), 0.0);
for (int i = 0; i < matrix.n1(); ++i) {
for (int j = 0; j < matrix.n2(); ++j) {
result({i, j, j}) = 1.0;
}
}
return result;
}
Array3D<float> ExtractTriangularMatrix(const Array3D<float>& matrix,
bool lower) {
Array3D<float> result(matrix);
for (int i = 0; i < result.n1(); ++i) {
for (int j = 0; j < result.n2(); ++j) {
if (lower) {
for (int k = j + 1; k < result.n3(); ++k) {
result({i, j, k}) = 0.0;
}
} else {
for (int k = 0; k < j; ++k) {
result({i, j, k}) = 0.0;
}
}
}
}
return result;
}
Array3D<float> batch_3d_4x4_;
Array2D<float> matrix2d_8x8_;
Array2D<float> low_rank_4x4_;
Array2D<int> wrong_type_4x4_;
};
XlaOp GetAverageAbsoluteError(XlaOp m1, XlaOp m2, XlaBuilder* builder) {
Shape shape = builder->GetShape(m1).value();
int64_t size = ShapeUtil::ElementsIn(shape);
return ReduceAll(Abs(m1 - m2), ConstantR0WithType(builder, F32, 0),
CreateScalarAddComputation(F32, builder)) /
ConstantR0WithType(builder, F32, std::max<int64_t>(1, size));
}
XlaOp ComputeMatmulVWVt(SelfAdjointEigResult result, XlaBuilder* builder) {
Shape shape = builder->GetShape(result.v).value();
absl::Span<const int64_t> out_dims = shape.dimensions();
std::vector<int64_t> broadcast_dims(shape.rank() - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[shape.rank() - 2] = shape.rank() - 1;
auto vw =
Mul(result.v,
BroadcastInDim(ConvertElementType(result.w, shape.element_type()),
out_dims, broadcast_dims));
return BatchDot(vw, MaybeConjugate(TransposeInMinorDims(result.v), true),
PrecisionConfig::HIGHEST);
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_2x4x4) {
for (bool sort_eigenvalues : {false, true}) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a, true, 15,
1e-5, sort_eigenvalues);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_3x3_Complex) {
XlaBuilder builder(TestName());
Array<complex64> input = {
{1, complex64{2, -7}, complex64{4, -8}},
{complex64{2, 7}, 3, complex64{5, -9}},
{complex64{4, 8}, complex64{5, 9}, 6},
};
XlaOp a;
auto a_data = CreateParameter<complex64>(input, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompare<complex64>(&builder, input, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_Lower_2x4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(
ExtractTriangularMatrix(batch_3d_4x4_, true), 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_VWVt_EQ_A_Upper_2x4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(
ExtractTriangularMatrix(batch_3d_4x4_, false), 0, "a", &builder, &a);
auto result = SelfAdjointEig(a, false);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_Orthogonality_2x4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
BatchDot(result.v, TransposeInMinorDims(result.v), PrecisionConfig::HIGHEST);
ComputeAndCompareR3<float>(&builder, GetUnitMatrix3D(batch_3d_4x4_),
{a_data.get()}, ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_VtWV_EQ_A_Rank_Deficient_4x4) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR2Parameter<float>(low_rank_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
ComputeMatmulVWVt(result, &builder);
ComputeAndCompareR2<float>(&builder, low_rank_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_Eigen_8x8) {
XlaBuilder builder(TestName());
std::vector<float> expected{-182.69205, -116.86245, -105.74489, -9.545369,
37.81711, 104.732285, 120.29153, 868.00385};
XlaOp a;
auto a_data = CreateR2Parameter<float>(matrix2d_8x8_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
Add(result.w, ZerosLike(result.w));
ComputeAndCompareR1<float>(&builder, expected, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Test_Orthogonality_8x8) {
XlaBuilder builder(TestName());
float expected_vals = 1e-3;
XlaOp a;
auto a_data = CreateR2Parameter<float>(matrix2d_8x8_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
GetAverageAbsoluteError(IdentityMatrix(&builder, F32, 8, 8),
BatchDot(TransposeInMinorDims(result.v), result.v),
&builder);
ComputeAndCompareR0<float>(&builder, expected_vals, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SelfAdjointEigTest, Wrong_Type_Int) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR2Parameter<int>(wrong_type_4x4_, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
EXPECT_FALSE(result.v.valid());
EXPECT_FALSE(result.w.valid());
}
Array2D<float> GenerateRandomSymmetricMatrix(int size) {
Array2D<float> result{size, size, 0.0};
result.FillRandom(10 , 2 , 12346 );
for (int i = 0; i < size; ++i) {
for (int j = 0; j < i; ++j) {
result({j, i}) = result({i, j});
}
}
return result;
}
using EighTestCase = int64_t;
class RandomEighTest : public ClientLibraryTestBase,
public ::testing::WithParamInterface<EighTestCase> {};
XLA_TEST_P(RandomEighTest, Random) {
XlaBuilder builder(TestName());
int64_t size = GetParam();
Array2D<float> a_val = GenerateRandomSymmetricMatrix(size);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SelfAdjointEig(a);
GetAverageAbsoluteError(ComputeMatmulVWVt(result, &builder), a, &builder);
double kExpected = 0.00300000003;
ComputeAndCompareR0<float>(&builder, kExpected, {a_data.get()},
ErrorSpec(kExpected, 0));
}
#ifndef XLA_TEST_BACKEND_CPU
INSTANTIATE_TEST_SUITE_P(
RandomEighTestInstantiation, RandomEighTest,
::testing::Values(0, 1, 2, 3, 8, 16, 32, 77, 129, 203, 256, 257, 493, 511,
512,
513, 1000),
[](const ::testing::TestParamInfo<EighTestCase>& info) {
const int64_t size = info.param;
return absl::StrCat(size);
});
#else
INSTANTIATE_TEST_SUITE_P(
RandomEighTestInstantiation, RandomEighTest,
::testing::Values(0, 1, 2, 3, 8, 16, 32, 77, 129, 203, 256, 257, 493, 511,
512),
[](const ::testing::TestParamInfo<EighTestCase>& info) {
const int64_t size = info.param;
return absl::StrCat(size);
});
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/self_adjoint_eig.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/self_adjoint_eig_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e116ea05-6142-4a01-912a-9c43073b8666 | cpp | tensorflow/tensorflow | slicing | third_party/xla/xla/hlo/builder/lib/slicing.cc | third_party/xla/xla/hlo/builder/lib/slicing_test.cc | #include "xla/hlo/builder/lib/slicing.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaOp SliceInMinorDims(XlaOp x, absl::Span<const int64_t> start,
absl::Span<const int64_t> end) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RET_CHECK(start.size() == end.size());
int64_t n_minor_dims = start.size();
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
TF_RET_CHECK(n_minor_dims <= n_dims);
auto major_dims = shape.dimensions().subspan(
0,
n_dims - n_minor_dims);
std::vector<int64_t> padded_start(n_dims, 0);
std::copy(start.begin(), start.end(),
padded_start.begin() + major_dims.size());
std::vector<int64_t> padded_end(n_dims);
std::copy(major_dims.begin(), major_dims.end(), padded_end.begin());
std::copy(end.begin(), end.end(), padded_end.begin() + major_dims.size());
std::vector<int64_t> strides(n_dims, 1);
return Slice(x, padded_start, padded_end, strides);
});
}
XlaOp UpdateSlice(XlaOp x, XlaOp update, absl::Span<const int64_t> start) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
const int64_t start_size = start.size();
TF_RET_CHECK(start_size == n_dims);
std::vector<int32_t> start_as_int32(start.begin(), start.end());
std::vector<XlaOp> start_ops(start.size());
for (int i = 0, end = start.size(); i < end; ++i) {
start_ops[i] = ConstantR0(builder, start_as_int32[i]);
}
return DynamicUpdateSlice(x, update, start_ops);
});
}
XlaOp UpdateSliceInMinorDims(XlaOp x, XlaOp update,
absl::Span<const int64_t> start) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
const int64_t n_minor_dims = start.size();
TF_RET_CHECK(n_minor_dims <= n_dims);
std::vector<int64_t> padded_start(n_dims, 0);
std::copy(start.begin(), start.end(),
padded_start.begin() + (n_dims - n_minor_dims));
return UpdateSlice(x, update, padded_start);
});
}
namespace {
std::vector<int64_t> ConcatVectors(absl::Span<const int64_t> xs,
absl::Span<const int64_t> ys) {
std::vector<int64_t> output(xs.size() + ys.size());
std::copy(xs.begin(), xs.end(), output.begin());
std::copy(ys.begin(), ys.end(), output.begin() + xs.size());
return output;
}
absl::StatusOr<std::vector<XlaOp>> PrependZerosInMajorDims(
XlaOp x, absl::Span<const XlaOp> starts) {
XlaBuilder* builder = x.builder();
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
auto zero = ConstantR0<int32_t>(builder, 0);
std::vector<XlaOp> padded_starts(n_dims, zero);
for (int i = 0; i < starts.size(); ++i) {
padded_starts[n_dims - starts.size() + i] = starts[i];
}
return padded_starts;
}
}
XlaOp DynamicSliceInMinorDims(XlaOp x, absl::Span<const XlaOp> starts,
absl::Span<const int64_t> sizes) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
int64_t n_minor_dims = starts.size();
TF_RET_CHECK(n_minor_dims == sizes.size());
TF_RET_CHECK(n_minor_dims <= n_dims);
auto major_dims = shape.dimensions().subspan(
0,
n_dims - sizes.size());
TF_ASSIGN_OR_RETURN(auto padded_starts, PrependZerosInMajorDims(x, starts));
auto padded_sizes = ConcatVectors(major_dims, sizes);
return DynamicSlice(x, padded_starts, padded_sizes);
});
}
XlaOp DynamicUpdateSliceInMinorDims(XlaOp x, XlaOp update,
absl::Span<const XlaOp> starts) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto padded_starts, PrependZerosInMajorDims(x, starts));
return DynamicUpdateSlice(x, update, padded_starts);
});
}
XlaOp TorchGather(XlaOp input, XlaOp index, int64_t dim, bool sparse) {
XlaBuilder* builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape index_shape, builder->GetShape(index));
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
if (ShapeUtil::ElementHasBitWidth(index_shape, 64) &&
input_shape.dimensions(dim) < std::numeric_limits<uint32_t>::max()) {
index = ConvertElementType(index, U32);
index_shape.set_element_type(U32);
}
if (index_shape.rank() == 1) {
return TorchIndexSelect(input, index, 0);
}
if (!sparse) {
std::vector<int64_t> index_broadcast_dims;
std::vector<int64_t> input_broadcast_dims;
std::vector<int64_t> sizes;
sizes.reserve(index_shape.rank());
for (int64_t i = 0; i < index_shape.rank(); ++i) {
if (i < dim) {
input_broadcast_dims.push_back(i);
index_broadcast_dims.push_back(i);
} else if (i == dim) {
sizes.push_back(input_shape.dimensions(i));
input_broadcast_dims.push_back(i);
index_broadcast_dims.push_back(i + 1);
} else {
input_broadcast_dims.push_back(i + 1);
index_broadcast_dims.push_back(i + 1);
}
sizes.push_back(index_shape.dimensions(i));
}
auto mask = Eq(
BroadcastInDim(index, sizes, index_broadcast_dims),
Iota(builder, ShapeUtil::MakeShape(index_shape.element_type(), sizes),
dim));
auto masked_input = Select(
mask, BroadcastInDim(input, sizes, input_broadcast_dims),
Zeros(builder,
ShapeUtil::MakeShape(input_shape.element_type(), sizes)));
return Reduce(masked_input, Zero(builder, input_shape.element_type()),
CreateScalarIdentityWithZeroComputation(
input_shape.element_type(), builder),
{dim});
}
ShapeUtil::AppendMajorDimension(1, &index_shape);
std::vector<XlaOp> to_concat;
to_concat.reserve(input_shape.rank());
for (int64_t i = 0; i < input_shape.rank(); ++i) {
if (i == dim) {
to_concat.push_back(Reshape(index, index_shape.dimensions()));
} else {
to_concat.push_back(Iota(builder, index_shape, i));
}
}
XlaOp gather_indices = ConcatInDim(builder, to_concat, input_shape.rank());
std::vector<int64_t> slice_sizes(input_shape.rank(), 1);
GatherDimensionNumbers gather_dnums;
gather_dnums.set_index_vector_dim(input_shape.rank());
for (int64_t i = 0; i < input_shape.rank(); ++i) {
gather_dnums.add_collapsed_slice_dims(i);
gather_dnums.add_start_index_map(i);
}
return Gather(input, gather_indices, gather_dnums, slice_sizes);
});
}
XlaOp TorchScatterDense(XlaOp input, XlaOp index, XlaOp src, int64_t dim,
const std::function<XlaOp(XlaOp, XlaOp)>& combiner) {
XlaBuilder* builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape index_shape, builder->GetShape(index));
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
std::vector<int64_t> index_broadcast_dims;
std::vector<int64_t> sizes;
const auto rank = index_shape.rank();
sizes.reserve(rank + 1);
for (int64_t i = 0; i < index_shape.rank(); ++i) {
if (i < dim) {
index_broadcast_dims.push_back(i);
} else {
if (i == dim) {
sizes.push_back(input_shape.dimensions(i));
}
index_broadcast_dims.push_back(i + 1);
}
sizes.push_back(index_shape.dimensions(i));
}
auto mask =
Eq(BroadcastInDim(index, sizes, index_broadcast_dims),
Iota(builder,
ShapeUtil::MakeShape(index_shape.element_type(), sizes), dim));
auto masked_src =
Select(mask, BroadcastInDim(src, sizes, index_broadcast_dims),
Zeros(builder,
ShapeUtil::MakeShape(input_shape.element_type(), sizes)));
return combiner(
input,
Reduce(masked_src, Zero(builder, input_shape.element_type()),
CreateScalarComputation("reducer", input_shape.element_type(),
builder, combiner),
{dim + 1}));
});
}
XlaOp TorchIndexSelect(XlaOp input, XlaOp index, int64_t dim,
int64_t batch_dims) {
XlaBuilder* builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
TF_ASSIGN_OR_RETURN(Shape index_shape, builder->GetShape(index));
if (dim < batch_dims) {
return InvalidArgument(
"Gather dim must be greater than or equal to the number of batch "
"dims");
}
if (ShapeUtil::ElementHasBitWidth(index_shape, 64) &&
input_shape.dimensions(dim) < std::numeric_limits<uint32_t>::max()) {
index = ConvertElementType(index, U32);
index_shape.set_element_type(U32);
}
std::vector<int64_t> slice_sizes = SpanToVector(input_shape.dimensions());
GatherDimensionNumbers gather_dnums;
gather_dnums.set_index_vector_dim(index_shape.rank());
if (batch_dims > 0) {
ShapeUtil::AppendMajorDimension(1, &index_shape);
std::vector<XlaOp> to_concat;
to_concat.reserve(batch_dims + 1);
xla::Shape iota_shape = xla::ShapeUtil::MakeStaticShape(index_shape);
for (int64_t batch_dim = 0; batch_dim < batch_dims; ++batch_dim) {
to_concat.push_back(Iota(builder, iota_shape, batch_dim));
}
to_concat.push_back(Reshape(index, index_shape.dimensions()));
index = ConcatInDim(builder, to_concat, gather_dnums.index_vector_dim());
}
for (int64_t i = 0; i < input_shape.rank(); ++i) {
if (i < batch_dims || i == dim) {
slice_sizes[i] = std::min<int64_t>(slice_sizes[i], 1);
gather_dnums.add_collapsed_slice_dims(i);
gather_dnums.add_start_index_map(i);
} else {
if (i < dim) {
gather_dnums.add_offset_dims(i);
} else {
gather_dnums.add_offset_dims(i + gather_dnums.index_vector_dim() -
(1 + batch_dims));
}
}
}
return Gather(input, index, gather_dnums, slice_sizes);
});
}
} | #include "xla/hlo/builder/lib/slicing.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using SlicingTest = xla::ClientLibraryTestBase;
xla::Array2D<float> BValsRight() {
return {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}};
}
xla::Array2D<float> BValsLeft() {
return {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}, {10, 11, 12}};
}
xla::Array2D<float> AValsFull() {
return {{2, 0, 1, 2}, {3, 6, 0, 1}, {4, 7, 9, 0}, {5, 8, 10, 11}};
}
xla::Array3D<float> BatchedAValsFull() {
return {{
{2, 0, 1, 2},
{3, 6, 0, 1},
{4, 7, 9, 0},
{5, 8, 10, 11},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 456, 106},
{12, 48, 106, 62},
}};
}
XLA_TEST_F(SlicingTest, Simple2dLookup) {
xla::XlaBuilder builder(TestName());
xla::XlaOp a, x, y;
auto a_data = CreateR2Parameter<float>(BValsRight(), 0, "a", &builder, &a);
auto x_data = CreateR0Parameter<int>(2, 1, "x", &builder, &x);
auto y_data = CreateR0Parameter<int>(1, 2, "y", &builder, &y);
DynamicSliceInMinorDims(a, {x, y}, {1, 1});
ComputeAndCompareR2<float>(&builder, {{10}},
{a_data.get(), x_data.get(), y_data.get()},
xla::ErrorSpec(1e-2, 1e-2));
}
XLA_TEST_F(SlicingTest, Simple3dLookup) {
xla::XlaBuilder builder(TestName());
xla::XlaOp a, index;
auto a_data =
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto index_data = CreateR0Parameter<int>(1, 1, "index", &builder, &index);
DynamicSliceInMinorDims(a, {index, xla::ConstantR0<int32_t>(&builder, 0)},
{1, 4});
ComputeAndCompareR3<float>(&builder, {{{3, 6, 0, 1}}, {{24, 61, 82, 48}}},
{a_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, NestedLookup) {
xla::XlaBuilder builder(TestName());
xla::XlaOp a, index;
auto a_data =
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto index_data = CreateR0Parameter<int>(1, 1, "index", &builder, &index);
auto slice = DynamicSliceInMinorDims(
a, {index, xla::ConstantR0<int32_t>(&builder, 0)}, {1, 4});
DynamicSliceInMinorDims(slice, {xla::ConstantR0<int32_t>(&builder, 0), index},
{1, 1});
ComputeAndCompareR3<float>(&builder, {{{6}}, {{61}}},
{a_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, SimpleSliceUpdate) {
xla::XlaBuilder builder(TestName());
xla::XlaOp a, b, x, y;
auto a_data = CreateR2Parameter<float>(AValsFull(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>({{9, 1, -10}}, 1, "b", &builder, &b);
auto x_data = CreateR0Parameter<int>(2, 2, "x", &builder, &x);
auto y_data = CreateR0Parameter<int>(1, 3, "y", &builder, &y);
DynamicUpdateSliceInMinorDims(a, b, {x, y});
xla::Array2D<float> expected(
{{{2, 0, 1, 2}, {3, 6, 0, 1}, {4, 9, 1, -10}, {5, 8, 10, 11}}});
ComputeAndCompareR2<float>(
&builder, expected,
{a_data.get(), b_data.get(), x_data.get(), y_data.get()});
}
XLA_TEST_F(SlicingTest, NestedSliceUpdate) {
xla::XlaBuilder builder(TestName());
xla::XlaOp a, b, x, y;
auto a_data = CreateR2Parameter<float>(AValsFull(), 0, "a", &builder, &a);
auto b_data = CreateR2Parameter<float>({{1, -10}}, 1, "b", &builder, &b);
auto x_data = CreateR0Parameter<int>(2, 2, "x", &builder, &x);
auto y_data = CreateR0Parameter<int>(1, 3, "y", &builder, &y);
auto z = xla::ConstantR0<int32_t>(&builder, 0);
auto slice = DynamicSliceInMinorDims(a, {x, z}, {1, 4});
auto inner = DynamicUpdateSliceInMinorDims(slice, b, {z, y});
DynamicUpdateSlice(a, inner, {x, z});
xla::Array2D<float> expected(
{{{2, 0, 1, 2}, {3, 6, 0, 1}, {4, 1, -10, 0}, {5, 8, 10, 11}}});
ComputeAndCompareR2<float>(
&builder, expected,
{a_data.get(), b_data.get(), x_data.get(), y_data.get()});
}
XLA_TEST_F(SlicingTest, TorchGatherSparse) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data =
CreateR2Parameter<int>({{1, 2}, {3, 4}}, 0, "input", &builder, &input);
auto index_data =
CreateR2Parameter<int>({{0, 0}, {1, 0}}, 1, "index", &builder, &index);
TorchGather(input, index, 1);
ComputeAndCompareR2<int>(&builder, {{1, 1}, {4, 3}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, TorchGatherDense) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data =
CreateR2Parameter<int>({{1, 2}, {3, 4}}, 0, "input", &builder, &input);
auto index_data =
CreateR2Parameter<int>({{0, 0}, {1, 0}}, 1, "index", &builder, &index);
TorchGather(input, index, 1, false);
ComputeAndCompareR2<int>(&builder, {{1, 1}, {4, 3}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, TorchScatterDense) {
xla::XlaBuilder builder(TestName());
xla::XlaOp src, index, input;
auto input_data = CreateR2Parameter<int>({{0, 0, 0}, {0, 0, 0}}, 0, "input",
&builder, &input);
auto index_data =
CreateR2Parameter<int>({{1, 0}, {1, 2}}, 1, "index", &builder, &index);
auto src_data =
CreateR2Parameter<int>({{1, 2}, {3, 4}}, 2, "src", &builder, &src);
TorchScatterDense(input, index, src, 1,
[](XlaOp l, XlaOp r) { return l + r; });
ComputeAndCompareR2<int>(
&builder, {{2, 1, 0}, {0, 3, 4}},
{input_data.get(), index_data.get(), src_data.get()});
}
XLA_TEST_F(SlicingTest, TorchIndexSelectOn0) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data =
CreateR2Parameter<float>({{0.1427, 0.0231, -0.5414, -1.0009},
{-0.4664, 0.2647, -0.1228, -1.1068},
{-1.1734, -0.6571, 0.7230, -0.6004}},
0, "input", &builder, &input);
auto index_data =
CreateR1Parameter<int>({0, 2}, 1, "index", &builder, &index);
TorchIndexSelect(input, index, 0);
ComputeAndCompareR2<float>(
&builder,
{{0.1427, 0.0231, -0.5414, -1.0009}, {-1.1734, -0.6571, 0.7230, -0.6004}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, TorchIndexSelectOn0Size1) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data = CreateR2Parameter<float>(
{{-1.1734, -0.6571, 0.7230, -0.6004}}, 0, "input", &builder, &input);
auto index_data =
CreateR1Parameter<int>({0, 0, 0, 0, 0, 0}, 1, "index", &builder, &index);
TorchIndexSelect(input, index, 0);
ComputeAndCompareR2<float>(&builder,
{{-1.1734, -0.6571, 0.7230, -0.6004},
{-1.1734, -0.6571, 0.7230, -0.6004},
{-1.1734, -0.6571, 0.7230, -0.6004},
{-1.1734, -0.6571, 0.7230, -0.6004},
{-1.1734, -0.6571, 0.7230, -0.6004},
{-1.1734, -0.6571, 0.7230, -0.6004}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, TorchIndexSelectOn1) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data =
CreateR2Parameter<float>({{0.1427, 0.0231, -0.5414, -1.0009},
{-0.4664, 0.2647, -0.1228, -1.1068},
{-1.1734, -0.6571, 0.7230, -0.6004}},
0, "input", &builder, &input);
auto index_data =
CreateR1Parameter<int>({0, 2}, 1, "index", &builder, &index);
TorchIndexSelect(input, index, 1);
ComputeAndCompareR2<float>(
&builder, {{0.1427, -0.5414}, {-0.4664, -0.1228}, {-1.1734, 0.7230}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, EmptyIndexSelect) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data =
CreateR2Parameter<float>({{0}, {0}, {0}}, 0, "input", &builder, &input);
auto index_data = CreateR1Parameter<int>({}, 1, "index", &builder, &index);
TorchIndexSelect(input, index, 1);
ComputeAndCompareR2<float>(&builder, {{}, {}, {}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, DoubleEmptyIndexSelect) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
Literal l(ShapeUtil::MakeShape(F32, {0, 1, 2, 0}));
Literal i(ShapeUtil::MakeShape(S32, {0}));
TF_ASSERT_OK_AND_ASSIGN(
auto input_data,
CreateParameterAndTransferLiteral(0, l, "input", &builder, &input));
TF_ASSERT_OK_AND_ASSIGN(
auto index_data,
CreateParameterAndTransferLiteral(1, i, "index", &builder, &index));
TorchIndexSelect(input, index, 0);
ComputeAndCompareLiteral(&builder, l, {input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, EmptyIndexSelectNonZero) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
Literal l(ShapeUtil::MakeShape(F32, {0, 2}));
TF_ASSERT_OK_AND_ASSIGN(
auto input_data,
CreateParameterAndTransferLiteral(0, l, "input", &builder, &input));
auto index_data =
CreateR1Parameter<int>({0, 0, 0}, 1, "index", &builder, &index);
TorchIndexSelect(input, index, 0);
ComputeAndCompareR2<float>(&builder,
{{0.0f, 0.0f}, {0.0f, 0.0f}, {0.0f, 0.0f}},
{input_data.get(), index_data.get()});
}
XLA_TEST_F(SlicingTest, BatchTorchIndexSelectOn0) {
xla::XlaBuilder builder(TestName());
xla::XlaOp input, index;
auto input_data =
CreateR3Parameter<int>({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}},
{{3, 2, 1, 0}, {7, 6, 5, 4}, {11, 10, 9, 8}}},
0, "input", &builder, &input);
auto index_data =
CreateR2Parameter<int>({{0, 2}, {1, 2}}, 1, "index", &builder, &index);
TorchIndexSelect(input, index, 1, 1);
ComputeAndCompareR3<int>(
&builder,
{{{0, 1, 2, 3}, {8, 9, 10, 11}}, {{7, 6, 5, 4}, {11, 10, 9, 8}}},
{input_data.get(), index_data.get()});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/slicing.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/slicing_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7ab1b71f-221f-4d1d-98bf-b8020a98c711 | cpp | tensorflow/tensorflow | arithmetic | third_party/xla/xla/hlo/builder/lib/arithmetic.cc | tensorflow/lite/delegates/hexagon/builders/tests/arithmetic_test.cc | #include "xla/hlo/builder/lib/arithmetic.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaComputation CreateScalarComputation(const std::string& name,
PrimitiveType type, XlaBuilder* builder,
XlaOpGenerator generator) {
std::unique_ptr<XlaBuilder> b;
if (type == PRED) {
b = builder->CreateSubBuilder(name);
} else {
b = builder->CreateSubBuilder(
absl::StrCat(name, "_", PrimitiveType_Name(type)));
}
const Shape scalar = ShapeUtil::MakeShape(type, {});
auto lhs = Parameter(b.get(), 0, scalar, "lhs");
auto rhs = Parameter(b.get(), 1, scalar, "rhs");
generator(lhs, rhs);
return b->BuildAndNoteError();
}
XlaComputation CreateScalarAddComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"add", type, builder, [](XlaOp lhs, XlaOp rhs) { return Add(lhs, rhs); });
}
XlaComputation CreateScalarMultiplyComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"mul", type, builder, [](XlaOp lhs, XlaOp rhs) { return Mul(lhs, rhs); });
}
XlaComputation CreateScalarGeComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"ge", type, builder, [](XlaOp lhs, XlaOp rhs) { return Ge(lhs, rhs); });
}
XlaComputation CreateScalarMaxComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"max", type, builder, [](XlaOp lhs, XlaOp rhs) { return Max(lhs, rhs); });
}
XlaComputation CreateScalarMinComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"min", type, builder, [](XlaOp lhs, XlaOp rhs) { return Min(lhs, rhs); });
}
XlaComputation CreateScalarAndComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"and", type, builder, [](XlaOp lhs, XlaOp rhs) { return And(lhs, rhs); });
}
XlaComputation CreateScalarOrComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"or", type, builder, [](XlaOp lhs, XlaOp rhs) { return Or(lhs, rhs); });
}
XlaComputation CreateScalarIdentityWithZeroComputation(PrimitiveType type,
XlaBuilder* builder) {
XlaComputation reducer =
(primitive_util::IsIntegralType(type) || type == PRED)
? CreateScalarOrComputation(type, builder)
: CreateScalarAddComputation(type, builder);
return reducer;
}
XlaOp Any(XlaOp predicates) {
XlaBuilder* builder = predicates.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto f = ConstantR0<bool>(builder, false);
XlaComputation logical_or = CreateScalarOrComputation(PRED, builder);
TF_ASSIGN_OR_RETURN(const Shape& predicates_shape,
builder->GetShape(predicates));
std::vector<int64_t> all_dimensions(predicates_shape.rank());
std::iota(all_dimensions.begin(), all_dimensions.end(), 0);
return Reduce(predicates, f, logical_or, all_dimensions);
});
}
static XlaComputation CreateMinMaxComputation(XlaBuilder* outer_builder,
PrimitiveType value_type,
PrimitiveType index_type,
bool is_min) {
auto sub_builder = outer_builder->CreateSubBuilder("minmax_func");
XlaBuilder* b = sub_builder.get();
XlaOp lhs_value =
Parameter(b, 0, ShapeUtil::MakeShape(value_type, {}), "lhs_value");
XlaOp lhs_index =
Parameter(b, 1, ShapeUtil::MakeShape(index_type, {}), "lhs_index");
XlaOp rhs_value =
Parameter(b, 2, ShapeUtil::MakeShape(value_type, {}), "rhs_value");
XlaOp rhs_index =
Parameter(b, 3, ShapeUtil::MakeShape(index_type, {}), "rhs_index");
XlaOp cmp = is_min ? Le(lhs_value, rhs_value) : Ge(lhs_value, rhs_value);
XlaOp max = Select(cmp, lhs_value, rhs_value);
XlaOp arg_max = Select(cmp, lhs_index, rhs_index);
XlaOp eq = Eq(lhs_value, rhs_value);
XlaOp tie_id = Min(lhs_index, rhs_index);
arg_max = Select(eq, tie_id, arg_max);
Tuple(b, {max, arg_max});
return b->BuildAndNoteError();
}
XlaOp ArgMinMax(XlaOp input, PrimitiveType output_type, int axis, bool is_min) {
XlaBuilder* builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
XlaOp value_init_value;
if (is_min) {
value_init_value = MaxValue(builder, input_shape.element_type());
} else {
value_init_value = MinValue(builder, input_shape.element_type());
}
int64_t dimension_size = input_shape.dimensions(axis);
auto index_type = dimension_size <= INT32_MAX ? S32 : output_type;
XlaOp index_init_value = Zero(builder, index_type);
auto iota_shape =
ShapeUtil::MakeShape(index_type, input_shape.dimensions());
XlaOp iota = Iota(builder, iota_shape, axis);
XlaComputation reducer = CreateMinMaxComputation(
builder, input_shape.element_type(), index_type, is_min);
XlaOp max_argmax = Reduce(builder, {input, iota},
{value_init_value, index_init_value}, reducer,
{axis});
XlaOp argmax = GetTupleElement(max_argmax, 1);
if (index_type != output_type) {
argmax = ConvertElementType(argmax, output_type);
}
return argmax;
});
}
XlaOp ArgMax(XlaOp input, PrimitiveType output_type, int axis) {
return ArgMinMax(input, output_type, axis, false);
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ArithmeticOpBaseModel : public SingleOpModelWithHexagon {
public:
ArithmeticOpBaseModel(const TensorData& input1, const TensorData& input2,
const TensorData& output)
: SingleOpModelWithHexagon() {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
}
ArithmeticOpBaseModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
const std::initializer_list<uint8_t>& input1_data,
const std::initializer_list<uint8_t>& input2_data) {
if (input1_data.size() > 0)
input1_ = AddConstInput(input1, input1_data);
else
input1_ = AddInput(input1);
if (input2_data.size() > 0)
input2_ = AddConstInput(input2, input2_data);
else
input2_ = AddInput(input2);
output_ = AddOutput(output);
}
void InitInterpreter() {
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
template <typename T>
void SetInput1(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input1_, data);
}
template <typename T>
void SetInput2(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input2_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
class AddOpModel : public ArithmeticOpBaseModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_func)
: ArithmeticOpBaseModel(input1, input2, output),
activation_func_(activation_func) {}
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
const std::initializer_list<uint8_t>& input1_data,
const std::initializer_list<uint8_t>& input2_data,
ActivationFunctionType activation_func)
: ArithmeticOpBaseModel(input1, input2, output, input1_data, input2_data),
activation_func_(activation_func) {}
void InitInterpreter() {
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_func_).Union());
ArithmeticOpBaseModel::InitInterpreter();
}
private:
ActivationFunctionType activation_func_;
};
template <TensorType tensor_type, typename integer_dtype>
void QuantizedTestsNoActivation(ActivationFunctionType activation_func) {
const float kQuantizedTolerance = 2.0 / 255.0;
std::vector<std::vector<float>> inputs1 = {
{0.1, 0.2, 0.3, 0.4}, {-0.8, 0.2, 0.4, 0.7}, {-0.8, 0.2, 0.7, 0.3}};
std::vector<std::vector<float>> inputs2 = {
{0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.8}, {0.6, 0.4, -0.8, 0.5}};
for (size_t i = 0; i < 1; ++i) {
AddOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
{tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
{tensor_type, {1, 2, 2, 1}, -1.0, 1.0}, activation_func);
m.InitInterpreter();
m.SetInput1<integer_dtype>(inputs1[i]);
m.SetInput2<integer_dtype>(inputs2[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<integer_dtype>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)))
<< "With test number " << i;
}
}
class QuantizedAddOpModel
: public testing::TestWithParam<ActivationFunctionType> {};
TEST_P(QuantizedAddOpModel, QuantizedTestsNoActivationUInt8) {
QuantizedTestsNoActivation<TensorType_UINT8, uint8_t>(GetParam());
}
TEST_P(QuantizedAddOpModel, QuantizedTestsNoActivationInt8) {
QuantizedTestsNoActivation<TensorType_INT8, int8_t>(GetParam());
}
TEST(QuantizedAddOpModelNoActivation, TestUInt8_ConstInput_1) {
const float kQuantizedTolerance = 2.0 / 255.0;
AddOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{110, 142, 156, 171}, {}, ActivationFunctionType_NONE);
m.InitInterpreter();
m.SetInput1<uint8_t>({0.1, 0.2, 0.3, 0.4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
TEST(QuantizedAddOpModelNoActivation, TestUInt8_ConstInput_2) {
const float kQuantizedTolerance = 2.0 / 255.0;
AddOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {},
{110, 142, 156, 171}, ActivationFunctionType_NONE);
m.InitInterpreter();
m.SetInput2<uint8_t>({0.1, 0.2, 0.3, 0.4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
TEST(QuantizedAddOpModelNoActivation, TestInt8_ConstInput) {
const float kQuantizedTolerance = 2.0 / 255.0;
AddOpModel m({TensorType_INT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_INT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_INT8, {1, 2, 2, 1}, -1.0, 1.0}, {},
{110, 101, 105, 120}, ActivationFunctionType_NONE);
m.InitInterpreter();
m.SetInput2<int8_t>({0.1, 0.2, 0.3, 0.4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<int8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
INSTANTIATE_TEST_SUITE_P(QuantizedAddOpModel, QuantizedAddOpModel,
testing::Values(ActivationFunctionType_NONE,
ActivationFunctionType_RELU,
ActivationFunctionType_RELU_N1_TO_1,
ActivationFunctionType_RELU6));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/arithmetic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/arithmetic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5459b5a-8eaa-4c4a-b2c6-5a56b32239f4 | cpp | tensorflow/tensorflow | matrix | third_party/xla/xla/hlo/builder/lib/matrix.cc | third_party/xla/xla/hlo/builder/lib/matrix_test.cc | #include "xla/hlo/builder/lib/matrix.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <map>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64_t m,
int64_t n) {
auto a = Iota(builder, U32, m);
auto b = Iota(builder, U32, n);
auto indicator = Eq(a, Broadcast(b, {m}), {0});
return ConvertElementType(indicator, type);
}
XlaOp GetDiagonalMask(XlaOp x, int diagonal) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
auto m = shape.dimensions(n_dims - 2);
auto n = shape.dimensions(n_dims - 1);
absl::Span<const int64_t> major_dims =
shape.dimensions().subspan(0, n_dims - 2);
auto a = Iota(builder, S32, n);
auto b = Iota(builder, S32, m) + ConstantR0WithType(builder, S32, diagonal);
auto indicator = Eq(b, Broadcast(a, {m}), {0});
auto mask = Broadcast(indicator, major_dims);
return mask;
});
}
XlaOp GetMatrixDiagonal(XlaOp x, int k) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
if (k <= -m || k >= n) {
auto zero_size_shape = shape;
zero_size_shape.DeleteDimension(n_dims - 1);
zero_size_shape.set_dimensions(n_dims - 2, 0);
return ConstantLiteral(builder, Literal{zero_size_shape});
}
auto mask = GetDiagonalMask(x, k);
int64_t reduce_dim = n_dims - 1;
if ((k == 0 && m >= n) || k < 0) {
reduce_dim = n_dims - 2;
}
auto result = Reduce(
Select(mask, x, Zeros(builder, shape)), ScalarLike(x, 0),
CreateScalarIdentityWithZeroComputation(shape.element_type(), builder),
{reduce_dim});
if (k == 0) {
return result;
}
return SliceInMinorDims(result, {0},
{k > 0 ? std::min(m, n - k) : std::min(n, m + k)});
});
}
XlaOp GetMatrixDiagonalViaGather(XlaOp x, int k) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
const int64_t num_index_dims = 2;
const int64_t axis = n_dims - num_index_dims;
const int64_t diag_len =
std::max(std::min(m + std::min(k, 0), n - std::max(k, 0)), int64_t{0});
XlaOp diag_base_indices = BroadcastInDim(Iota(builder, S32, diag_len),
{diag_len, num_index_dims}, {0});
XlaOp diag_offset =
Broadcast(ConstantR1<int>(builder, {std::max(-k, 0), std::max(k, 0)}),
{diag_len});
XlaOp start_indices = Add(diag_base_indices, diag_offset);
xla::GatherDimensionNumbers dim_numbers;
std::vector<int64_t> slice_sizes;
slice_sizes.reserve(n_dims);
for (int64_t i = 0; i < n_dims; i++) {
int64_t window_bound;
if (axis <= i) {
dim_numbers.add_collapsed_slice_dims(i);
dim_numbers.add_start_index_map(i);
window_bound = (shape.dimensions(i) != 0) ? 1 : 0;
} else {
dim_numbers.add_offset_dims(i);
window_bound = shape.dimensions(i);
}
slice_sizes.push_back(window_bound);
}
dim_numbers.set_index_vector_dim(1);
return Gather(x, start_indices, dim_numbers, slice_sizes,
true);
});
}
XlaOp SetMatrixDiagonal(XlaOp matrix, XlaOp diag, int k) {
XlaBuilder* builder = matrix.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(matrix));
TF_ASSIGN_OR_RETURN(Shape diag_shape, builder->GetShape(diag));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
const int64_t d = diag_shape.dimensions(n_dims - 2);
std::vector<int64_t> broadcast_dims(n_dims - 1);
absl::c_iota(broadcast_dims, 0);
int64_t pad_high = m - d;
if (k < 0) {
++(broadcast_dims.back());
pad_high = n - d;
}
if (pad_high != 0) {
PaddingConfig padding_config;
for (int64_t i = 0; i < diag_shape.rank() - 1; ++i) {
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_interior_padding(0);
dims->set_edge_padding_high(0);
}
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_interior_padding(0);
dims->set_edge_padding_high(pad_high);
diag = Pad(diag, ScalarLike(diag, 0), padding_config);
}
return Select(GetDiagonalMask(matrix, k),
BroadcastInDim(diag, shape.dimensions(), broadcast_dims),
matrix);
});
}
XlaOp TriangleMask(XlaOp x, int diagonal) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
absl::Span<const int64_t> major_dims =
shape.dimensions().subspan(0, n_dims - 2);
auto a = Iota(builder, S32, n);
auto b = Iota(builder, S32, m) + ConstantR0<int32_t>(builder, diagonal);
XlaOp indicator;
indicator = Ge(b, Broadcast(a, {m}), {0});
return Broadcast(indicator, major_dims);
});
}
XlaOp Triangle(XlaOp x, bool lower) {
return lower ? Select(TriangleMask(x, 0), x, ZerosLike(x))
: Select(TriangleMask(x, -1), ZerosLike(x), x);
}
XlaOp UpperTriangle(XlaOp x) { return Triangle(x, false); }
XlaOp LowerTriangle(XlaOp x) { return Triangle(x, true); }
XlaOp Symmetrize(XlaOp x, bool lower) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
if (shape.rank() < 2) {
return InvalidArgument(
"Argument to symmetrize must have >= 2 dimensions, got %s",
shape.ToString());
}
const int64_t m = ShapeUtil::GetDimension(shape, -2);
const int64_t n = ShapeUtil::GetDimension(shape, -1);
if (m != n) {
return InvalidArgument(
"The two most minor dimensions of the argument to symmetrize must be "
"equal size, got %s",
shape.ToString());
}
auto mask = lower ? TriangleMask(x, 0) : Not(TriangleMask(x, -1));
if (primitive_util::IsComplexType(shape.element_type())) {
auto re = Select(mask, Real(x), TransposeInMinorDims(Real(x)));
auto im_mask = lower ? TriangleMask(x, -1) : Not(TriangleMask(x, 0));
auto im = Select(im_mask, Imag(x), ZerosLike(Imag(x)));
im = Select(mask, im, -TransposeInMinorDims(im));
return Complex(re, im);
} else {
return Select(mask, x, TransposeInMinorDims(x));
}
});
}
namespace {
std::optional<std::array<std::vector<int64_t>, 3>> EinsumDiagonalLabels(
absl::Span<const int64_t> config) {
std::vector<int64_t> unique_labels;
std::vector<int64_t> reduce_dims;
std::vector<int64_t> broadcast_dims;
for (auto label = config.begin(); label != config.end(); ++label) {
auto first_label = absl::c_find(config, *label);
auto dim = label - config.begin();
if (first_label == label) {
unique_labels.push_back(*label);
broadcast_dims.push_back(dim);
} else {
reduce_dims.push_back(dim);
}
}
if (unique_labels.size() == config.size()) {
return std::nullopt;
}
return {{unique_labels, reduce_dims, broadcast_dims}};
}
xla::XlaOp EinsumDiagonalMask(XlaOp x, absl::Span<const int64_t> config) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
Shape iota_shape = ShapeUtil::MakeShape(S32, x_shape.dimensions());
XlaOp mask = ConstantR0(builder, true);
for (auto label = config.begin(); label != config.end(); ++label) {
const int64_t dim = label - config.begin();
auto first_label = absl::c_find(config, *label);
if (first_label != label) {
const int64_t first_dim = first_label - config.begin();
mask = And(mask, Eq(Iota(builder, iota_shape, first_dim),
Iota(builder, iota_shape, dim)));
}
}
return Select(mask, x, ZerosLike(x));
});
}
xla::XlaOp EinsumDiagonal(XlaOp x, absl::Span<const int64_t> config) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto labels = EinsumDiagonalLabels(config);
if (!labels) {
return x;
}
auto zero = ScalarLike(x, 0);
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
return Reduce(EinsumDiagonalMask(x, config), zero,
CreateScalarIdentityWithZeroComputation(
x_shape.element_type(), builder),
labels->at(1));
});
}
xla::XlaOp EinsumInverseDiagonal(XlaOp x, absl::Span<const int64_t> config) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto labels = EinsumDiagonalLabels(config);
if (!labels) {
return x;
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
std::vector<int64_t> broadcast_sizes;
int64_t x_dim = 0;
for (auto label = config.begin(); label != config.end(); ++label) {
auto first_label = absl::c_find(config, *label);
if (first_label == label) {
broadcast_sizes.push_back(x_shape.dimensions(x_dim));
++x_dim;
} else {
broadcast_sizes.push_back(
broadcast_sizes[first_label - config.begin()]);
}
}
x = BroadcastInDim(x, broadcast_sizes, labels->at(2));
return EinsumDiagonalMask(x, config);
});
}
}
namespace {
template <typename C>
void DeleteDimsFromContainer(absl::Span<const int64_t> to_delete, Shape* shape,
C* batch_dims, C* contracting_dims) {
if (to_delete.empty()) {
return;
}
for (int64_t i = to_delete.size() - 1; i >= 0; --i) {
int64_t dim = to_delete[i];
shape->DeleteDimension(dim);
for (auto& b : *batch_dims) {
if (b > dim) {
--b;
}
}
for (auto& c : *contracting_dims) {
if (c > dim) {
--c;
}
}
}
}
}
xla::XlaOp Einsum(xla::XlaOp x, absl::Span<const int64_t> x_config,
xla::XlaOp y, absl::Span<const int64_t> y_config,
absl::Span<const int64_t> output_config,
xla::PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type,
bool grad_x, bool grad_y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto x_diagonal_labels = EinsumDiagonalLabels(x_config);
if (x_diagonal_labels) {
return Einsum(EinsumDiagonal(x, x_config), x_diagonal_labels->at(0), y,
y_config, output_config, precision, preferred_element_type,
grad_x, grad_y);
}
auto y_diagonal_labels = EinsumDiagonalLabels(y_config);
if (y_diagonal_labels) {
return Einsum(x, x_config, EinsumDiagonal(y, y_config),
y_diagonal_labels->at(0), output_config, precision,
preferred_element_type, grad_x, grad_y);
}
auto output_diagonal_labels = EinsumDiagonalLabels(output_config);
if (output_diagonal_labels) {
return EinsumInverseDiagonal(
Einsum(x, x_config, y, y_config, output_diagonal_labels->at(0),
precision, preferred_element_type, grad_x, grad_y),
output_config);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
TF_ASSIGN_OR_RETURN(Shape y_shape, builder->GetShape(y));
const int64_t x_rank = x_config.size();
const int64_t y_rank = y_config.size();
const int64_t output_rank = output_config.size();
absl::flat_hash_set<int64_t> x_map;
absl::flat_hash_set<int64_t> y_map;
absl::flat_hash_set<int64_t> output_map;
for (auto d : x_config) {
x_map.insert(d);
}
for (auto d : y_config) {
y_map.insert(d);
}
for (auto d : output_config) {
output_map.insert(d);
}
DotDimensionNumbers dnums;
auto is_batch_dim = [&](int64_t d) {
return x_map.contains(d) && y_map.contains(d) && output_map.contains(d);
};
auto is_contracting = [&](int64_t d) {
return x_map.contains(d) && y_map.contains(d);
};
auto rhs_dimension_number = [&](int64_t d) {
return absl::c_find(y_config, d) - y_config.begin();
};
absl::InlinedVector<int64_t, 8> rhs_outer_dims;
absl::InlinedVector<int64_t, 8> lhs_outer_dims;
absl::InlinedVector<int64_t, 8> rhs_delete_dims;
absl::InlinedVector<int64_t, 8> lhs_delete_dims;
for (int64_t i = 0; i < x_rank; ++i) {
auto dim_name = x_config[i];
const int64_t rhs_dim = rhs_dimension_number(dim_name);
if (is_batch_dim(dim_name)) {
if (x_shape.dimensions(i) == y_shape.dimensions(rhs_dim)) {
dnums.add_lhs_batch_dimensions(i);
dnums.add_rhs_batch_dimensions(rhs_dim);
} else if (x_shape.dimensions(i) == 1) {
rhs_outer_dims.push_back(rhs_dim);
lhs_delete_dims.push_back(i);
} else {
lhs_outer_dims.push_back(i);
rhs_delete_dims.push_back(rhs_dim);
}
} else if (is_contracting(dim_name)) {
if (x_shape.dimensions(i) == y_shape.dimensions(rhs_dim)) {
dnums.add_lhs_contracting_dimensions(i);
dnums.add_rhs_contracting_dimensions(rhs_dim);
} else if (x_shape.dimensions(i) == 1) {
rhs_outer_dims.push_back(rhs_dim);
lhs_delete_dims.push_back(i);
} else {
lhs_outer_dims.push_back(i);
rhs_delete_dims.push_back(rhs_dim);
}
} else {
lhs_outer_dims.push_back(i);
}
}
for (int64_t i = 0; i < y_rank; ++i) {
auto dim_name = y_config[i];
if (!is_batch_dim(dim_name) && !is_contracting(dim_name)) {
rhs_outer_dims.push_back(i);
}
}
absl::c_sort(rhs_outer_dims);
absl::InlinedVector<int64_t, 8> output_transpose_dims;
auto output_dimension_number = [&](int64_t d) -> std::optional<int64_t> {
auto pos = absl::c_find(output_config, d);
if (pos == output_config.end()) {
return std::nullopt;
}
return pos - output_config.begin();
};
for (auto d : dnums.lhs_batch_dimensions()) {
output_transpose_dims.push_back(*output_dimension_number(x_config[d]));
}
for (auto d : lhs_outer_dims) {
if (auto output_dim = output_dimension_number(x_config[d])) {
output_transpose_dims.push_back(*output_dim);
continue;
}
lhs_delete_dims.push_back(d);
}
for (auto d : rhs_outer_dims) {
if (auto output_dim = output_dimension_number(y_config[d])) {
output_transpose_dims.push_back(*output_dim);
continue;
}
rhs_delete_dims.push_back(d);
}
const int64_t transpose_rank = output_transpose_dims.size();
std::vector<int64_t> transpose_dims(output_rank);
for (int64_t i = 0; i < transpose_rank; ++i) {
transpose_dims[output_transpose_dims[i]] = i;
}
absl::c_sort(lhs_delete_dims);
DeleteDimsFromContainer(lhs_delete_dims, &x_shape,
dnums.mutable_lhs_batch_dimensions(),
dnums.mutable_lhs_contracting_dimensions());
absl::c_sort(rhs_delete_dims);
DeleteDimsFromContainer(rhs_delete_dims, &y_shape,
dnums.mutable_rhs_batch_dimensions(),
dnums.mutable_rhs_contracting_dimensions());
if (!lhs_delete_dims.empty()) {
x = Reduce(x, ScalarLike(x, 0),
CreateScalarAddComputation(x_shape.element_type(), builder),
lhs_delete_dims);
}
if (!rhs_delete_dims.empty()) {
y = Reduce(y, ScalarLike(y, 0),
CreateScalarAddComputation(y_shape.element_type(), builder),
rhs_delete_dims);
}
PrecisionConfig precision_proto;
precision_proto.add_operand_precision(precision);
precision_proto.add_operand_precision(precision);
auto dot =
DotGeneral(x, y, dnums, &precision_proto, preferred_element_type);
TF_RETURN_IF_ERROR(builder->SetInstructionFrontendAttribute(
dot, "grad_x", (grad_x ? "true" : "false")));
TF_RETURN_IF_ERROR(builder->SetInstructionFrontendAttribute(
dot, "grad_y", (grad_y ? "true" : "false")));
dot = Transpose(dot, transpose_dims);
if (transpose_rank == output_rank) {
return dot;
}
auto is_output_only = [&](int64_t d) {
return output_map.contains(d) && !x_map.contains(d) && !y_map.contains(d);
};
int64_t dot_dim = 0;
std::vector<int64_t> new_dims;
new_dims.reserve(output_rank);
TF_ASSIGN_OR_RETURN(Shape dot_shape, builder->GetShape(dot));
for (auto d : output_config) {
if (is_output_only(d)) {
new_dims.push_back(1);
} else {
new_dims.push_back(dot_shape.dimensions(dot_dim));
}
}
return Reshape(dot, new_dims);
});
}
XlaOp BatchDot(XlaOp x, XlaOp y, PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type) {
return BatchDot(x, false, y, false, precision, preferred_element_type);
}
XlaOp BatchDot(XlaOp x, bool transpose_x, XlaOp y, bool transpose_y,
PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type, bool grad_x,
bool grad_y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::string string("...mk,...kn->...mn");
if (transpose_x) {
std::swap(string[3], string[4]);
}
if (transpose_y) {
std::swap(string[6 + 3], string[6 + 4]);
}
return Einsum(x, y, string, precision, preferred_element_type, grad_x,
grad_y);
});
}
absl::StatusOr<std::array<std::vector<int64_t>, 3>> ParseEinsumString(
absl::string_view einsum_config, int64_t x_rank, int64_t y_rank) {
std::array<std::vector<int64_t>, 3> einsum_config_numeric;
std::vector<absl::string_view> main_split =
absl::StrSplit(einsum_config, ',');
if (main_split.size() != 2) {
return InvalidArgument("Expected one \",\" in einsum_config.");
}
auto maybe_invalid_character = [](char d) -> absl::Status {
if (absl::ascii_isalpha(d)) {
return absl::OkStatus();
}
if (d == '.') {
return InvalidArgument("Unsupported \".\" in einsum config.");
}
return InvalidArgument("Unexpected character in einsum config.");
};
auto string_config_to_numeric =
[&](absl::string_view config, bool is_input_config, int64_t input_rank,
int64_t ellipsis_rank,
std::vector<int64_t>* numeric_config) -> absl::StatusOr<int64_t> {
std::vector<absl::string_view> splits = absl::StrSplit(config, "...");
if (splits.empty()) {
return ellipsis_rank;
}
if (splits.size() > 2) {
return InvalidArgument("Too many ellipses (\"...\") in einsum config.");
}
const bool has_ellipsis = splits.size() > 1;
if (is_input_config && has_ellipsis) {
ellipsis_rank = input_rank -
static_cast<int64_t>(splits[0].size() + splits[1].size());
if (ellipsis_rank < 0) {
return InvalidArgument(
"Too few dimensions in the input for the given einsum config.");
}
}
for (char d : splits[0]) {
TF_RETURN_IF_ERROR(maybe_invalid_character(d));
numeric_config->push_back(static_cast<int64_t>(d));
}
if (has_ellipsis) {
for (int64_t i = ellipsis_rank; i > 0; --i) {
numeric_config->push_back(-i);
}
for (char d : splits[1]) {
TF_RETURN_IF_ERROR(maybe_invalid_character(d));
numeric_config->push_back(static_cast<int64_t>(d));
}
}
return ellipsis_rank;
};
TF_ASSIGN_OR_RETURN(
const int64_t x_ellipsis_rank,
string_config_to_numeric(main_split[0],
true, x_rank,
0, &einsum_config_numeric[0]));
std::vector<absl::string_view> y_output_split =
absl::StrSplit(main_split[1], "->");
if (y_output_split.size() != 2) {
return InvalidArgument("Expected one \"->\" in einsum_config.");
}
TF_ASSIGN_OR_RETURN(
const int64_t y_ellipsis_rank,
string_config_to_numeric(y_output_split[0],
true, y_rank,
0, &einsum_config_numeric[1]));
TF_ASSIGN_OR_RETURN(
std::ignore,
string_config_to_numeric(
y_output_split[1], false,
0,
std::max(x_ellipsis_rank, y_ellipsis_rank),
&einsum_config_numeric[2]));
return einsum_config_numeric;
}
std::string NormalizeEinsumString(absl::string_view einsum_config) {
if (einsum_config.find("->") != einsum_config.npos) {
return "";
}
bool has_ellipsis = einsum_config.find("...") != einsum_config.npos;
std::map<char, int64_t> chars;
for (char c : einsum_config) {
if (absl::ascii_isalpha(c)) {
++chars[c];
}
}
std::string new_config(einsum_config.begin(), einsum_config.end());
new_config.append("->");
if (has_ellipsis) {
new_config.append("...");
}
for (auto p : chars) {
if (p.second == 1) {
new_config.push_back(p.first);
}
}
return new_config;
}
XlaOp Einsum(XlaOp x, XlaOp y, absl::string_view einsum_config,
PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type, bool grad_x,
bool grad_y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto new_config = NormalizeEinsumString(einsum_config);
if (!new_config.empty()) {
return Einsum(x, y, new_config, precision, preferred_element_type, grad_x,
grad_y);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
TF_ASSIGN_OR_RETURN(Shape y_shape, builder->GetShape(y));
TF_ASSIGN_OR_RETURN(
auto einsum_config_numeric,
ParseEinsumString(einsum_config, x_shape.rank(), y_shape.rank()));
return Einsum(x, einsum_config_numeric[0], y, einsum_config_numeric[1],
einsum_config_numeric[2], precision, preferred_element_type,
grad_x, grad_y);
});
}
XlaOp Einsum(XlaOp x, absl::string_view einsum_config,
PrecisionConfig::Precision precision) {
return Einsum(ScalarLike(x, 1), x, absl::StrCat(",", einsum_config),
precision);
}
XlaOp TransposeInMinorDims(XlaOp x) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
TF_RET_CHECK(n_dims >= 2);
std::vector<int64_t> permutation(n_dims);
std::iota(permutation.begin(), permutation.end(), 0);
std::swap(permutation[n_dims - 1], permutation[n_dims - 2]);
return Transpose(x, permutation);
});
}
XlaOp MaybeTransposeInMinorDims(XlaOp x, bool transpose) {
return transpose ? TransposeInMinorDims(x) : x;
}
} | #include "xla/hlo/builder/lib/matrix.h"
#include <limits>
#include <map>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
namespace xla {
namespace {
class MatrixTest : public ClientLibraryTestBase {
protected:
template <typename T>
void TestMatrixDiagonal();
template <typename T>
void TestMatrixDiagonal4D();
template <typename T>
void TestSetMatrixDiagonal();
template <typename T>
std::map<int, Array2D<T>> k_and_expected() const {
return std::map<int, Array2D<T>>{
{0, {{0, 5, 10}, {12, 17, 22}}},
{1, {{1, 6, 11}, {13, 18, 23}}},
{2, {{2, 7}, {14, 19}}},
{3, {{3}, {15}}},
{4, {{}, {}}},
{-1, {{4, 9}, {16, 21}}},
{-2, {{8}, {20}}},
{-3, {{}, {}}},
{-4, {{}, {}}},
};
}
};
XLA_TEST_F(MatrixTest, Triangle) {
XlaBuilder builder(TestName());
Array3D<int32_t> input(2, 3, 4);
input.FillIota(0);
XlaOp a;
auto a_data = CreateR3Parameter<int32_t>(input, 0, "a", &builder, &a);
LowerTriangle(a);
Array3D<int32_t> expected({{{0, 0, 0, 0}, {4, 5, 0, 0}, {8, 9, 10, 0}},
{{12, 0, 0, 0}, {16, 17, 0, 0}, {20, 21, 22, 0}}});
ComputeAndCompareR3<int32_t>(&builder, expected, {a_data.get()});
}
XLA_TEST_F(MatrixTest, Symmetrize) {
for (bool lower : {false, true}) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
Array<float> input = {
{1, nan, nan},
{2, 3, nan},
{4, 5, 6},
};
XlaOp a;
auto a_data = CreateParameter<float>(input, 0, "a", &builder, &a);
Symmetrize(lower ? a : TransposeInMinorDims(a), lower);
Array<float> expected = {
{1, 2, 4},
{2, 3, 5},
{4, 5, 6},
};
ComputeAndCompare<float>(&builder, expected, {a_data.get()});
}
}
XLA_TEST_F(MatrixTest, SymmetrizeComplex) {
for (bool lower : {false, true}) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
Array<complex64> input = {
{complex64{1, nan}, nan, nan},
{complex64{2, 7}, complex64{3, nan}, nan},
{complex64{4, 8}, complex64{5, 9}, complex64{6, nan}},
};
XlaOp a;
auto a_data = CreateParameter<complex64>(input, 0, "a", &builder, &a);
Symmetrize(lower ? a : Conj(TransposeInMinorDims(a)), lower);
Array<complex64> expected = {
{1, complex64{2, -7}, complex64{4, -8}},
{complex64{2, 7}, 3, complex64{5, -9}},
{complex64{4, 8}, complex64{5, 9}, 6},
};
ComputeAndCompare<complex64>(&builder, expected, {a_data.get()});
}
}
template <typename T>
void MatrixTest::TestMatrixDiagonal() {
XlaBuilder builder("SetMatrixDiagonal");
Array3D<T> input(2, 3, 4);
input.FillIota(0);
for (const auto& kv : k_and_expected<T>()) {
XlaOp a;
auto a_data = CreateR3Parameter<T>(input, 0, "a", &builder, &a);
GetMatrixDiagonal(a, kv.first);
ComputeAndCompareR2<T>(&builder, kv.second, {a_data.get()});
}
}
template <typename T>
void MatrixTest::TestSetMatrixDiagonal() {
XlaBuilder builder("GetMatrixDiagonal");
Array3D<T> input(2, 3, 4);
input.FillIota(0);
for (const auto& kv : k_and_expected<T>()) {
XlaOp a;
XlaOp b;
auto a_data = CreateR3Parameter<T>(input, 0, "a", &builder, &a);
auto new_diag =
CreateR2Parameter<T>(Array2D<T>{kv.second}, 1, "d", &builder, &b);
GetMatrixDiagonal(SetMatrixDiagonal(a, b + ScalarLike(b, 1), kv.first),
kv.first) -
ScalarLike(b, 1);
ComputeAndCompareR2<T>(&builder, kv.second, {a_data.get(), new_diag.get()});
}
}
XLA_TEST_F(MatrixTest, SetMatrixDiagonal_S32) {
TestSetMatrixDiagonal<int32_t>();
}
XLA_TEST_F(MatrixTest, SetMatrixDiagonal_S64) {
TestSetMatrixDiagonal<int64_t>();
}
XLA_TEST_F(MatrixTest, SetMatrixDiagonal_F32) {
TestSetMatrixDiagonal<float>();
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal_S32) { TestMatrixDiagonal<int32_t>(); }
XLA_TEST_F(MatrixTest, GetMatrixDiagonal_S64) { TestMatrixDiagonal<int64_t>(); }
XLA_TEST_F(MatrixTest, GetMatrixDiagonal_F32) { TestMatrixDiagonal<float>(); }
template <typename T>
void MatrixTest::TestMatrixDiagonal4D() {
XlaBuilder builder("GetMatrixDiagonal");
Array4D<T> input(2, 2, 4, 3);
input.FillIota(0);
std::map<int, Array3D<T>> k_and_expected = {
{0, {{{0, 4, 8}, {12, 16, 20}}, {{24, 28, 32}, {36, 40, 44}}}},
{1, {{{1, 5}, {13, 17}}, {{25, 29}, {37, 41}}}},
{2, {{{2}, {14}}, {{26}, {38}}}},
{3, {{{}, {}}, {{}, {}}}},
{4, {{{}, {}}, {{}, {}}}},
{-1, {{{3, 7, 11}, {15, 19, 23}}, {{27, 31, 35}, {39, 43, 47}}}},
{-2, {{{6, 10}, {18, 22}}, {{30, 34}, {42, 46}}}},
{-3, {{{9}, {21}}, {{33}, {45}}}},
{-4, {{{}, {}}, {{}, {}}}},
};
for (const auto& kv : k_and_expected) {
XlaOp a;
auto a_data = CreateR4Parameter<T>(input, 0, "a", &builder, &a);
GetMatrixDiagonal(a, kv.first);
ComputeAndCompareR3<T>(&builder, kv.second, {a_data.get()});
}
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal4D_S32) {
TestMatrixDiagonal4D<int32_t>();
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal4D_S64) {
TestMatrixDiagonal4D<int64_t>();
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal4D_F32) {
TestMatrixDiagonal4D<float>();
}
Array3D<float> BatchedAValsFull() {
return {{
{2, 0, 1, 2},
{3, 6, 0, 1},
{4, 7, 9, 0},
{5, 8, 10, 11},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 456, 106},
{12, 48, 106, 62},
}};
}
XLA_TEST_F(MatrixTest, RowBatchDot) {
XlaBuilder builder(TestName());
int n = 4;
XlaOp a, row, index;
auto a_data =
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto row_data = CreateR3Parameter<float>({{{9, 1, 0, 0}}, {{2, 4, 0, 0}}}, 1,
"row", &builder, &row);
auto index_data = CreateR0Parameter<int>(1, 2, "index", &builder, &index);
auto l_index = DynamicSliceInMinorDims(
a, {index, ConstantR0<int32_t>(&builder, 0)}, {1, n});
BatchDot(l_index, TransposeInMinorDims(row));
ComputeAndCompareR3<float>(&builder, {{{33}}, {{292}}},
{a_data.get(), row_data.get(), index_data.get()});
}
XLA_TEST_F(MatrixTest, Einsum) {
XlaBuilder builder(TestName());
int n = 4;
XlaOp a, row, index;
auto a_data =
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto row_data = CreateR3Parameter<float>({{{9, 1, 0, 0}}, {{2, 4, 0, 0}}}, 1,
"row", &builder, &row);
auto index_data = CreateR0Parameter<int>(1, 2, "index", &builder, &index);
auto l_index = DynamicSliceInMinorDims(
a, {index, ConstantR0<int32_t>(&builder, 0)}, {1, n});
Einsum(l_index, row, "abc,adc->abd");
ComputeAndCompareR3<float>(&builder, {{{33}}, {{292}}},
{a_data.get(), row_data.get(), index_data.get()});
}
XLA_TEST_F(MatrixTest, ParseEinsumString) {
auto to_vec = [](absl::string_view s) {
std::vector<int64_t> v;
v.reserve(s.size());
int e = -3;
for (auto c : s) {
v.push_back(c == '.' ? e++ : int64_t{c});
}
return v;
};
auto to_string = [&](absl::string_view x, absl::string_view y,
absl::string_view o) {
return absl::StrCat(x, ",", y, "->", o);
};
std::vector<std::vector<std::string>> good_test_cases = {
{"ab", "bc", "ac"},
{"Bab", "Bbc", "Bac"},
{"ab", "cd", "dcba"},
{"abc", "abd", "cbd"},
{"...ab", "...bc", "...ac"},
{"a...bc", "...abd", "cbd..."},
{"...ab", "...bc", "ac"},
{"...b", "...bc", "...c"},
{"...abz", "...bc", "...ac"},
{"...ab", "...bcz", "...ac"},
{"abz", "bc", "ac"},
{"ab", "bcz", "ac"},
{"a", "b", "c"},
{"...a", "...b", "...c"},
{"abb", "bcc", "ac"},
{"ab", "bc", "ad"},
};
for (auto test_case : good_test_cases) {
auto parse_result_or_status =
ParseEinsumString(to_string(test_case[0], test_case[1], test_case[2]),
test_case[0].size(), test_case[1].size());
EXPECT_TRUE(parse_result_or_status.status().ok());
auto parse_result = parse_result_or_status.value();
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(parse_result[i], to_vec(test_case[i]));
}
}
std::vector<std::string> einsum_strings_that_fail_parsing = {
"", "a", "ab->ba", "ab,bc,cd->ad", "a...b...,bc->a...c",
};
for (auto test_case : einsum_strings_that_fail_parsing) {
auto parse_result_or_status = ParseEinsumString(test_case, 3, 3);
EXPECT_FALSE(parse_result_or_status.status().ok());
}
}
XLA_TEST_F(MatrixTest, NormalizeEinsumString) {
EXPECT_EQ(NormalizeEinsumString("a,b->ab"), "");
EXPECT_EQ(NormalizeEinsumString("ba"), "ba->ab");
EXPECT_EQ(NormalizeEinsumString("ab,dc"), "ab,dc->abcd");
EXPECT_EQ(NormalizeEinsumString("a,b"), "a,b->ab");
EXPECT_EQ(NormalizeEinsumString("...ba,ca..."), "...ba,ca...->...bc");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/matrix.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/matrix_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ec5cebe-660a-40ef-881c-5670d88f4be5 | cpp | tensorflow/tensorflow | prng | third_party/xla/xla/hlo/builder/lib/prng.cc | third_party/xla/xla/hlo/builder/lib/prng_test.cc | #include "xla/hlo/builder/lib/prng.h"
#include <array>
#include <cmath>
#include <cstdint>
#include <iterator>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
xla::XlaOp ConcatScalars(xla::XlaBuilder* builder,
absl::Span<const xla::XlaOp> scalars) {
std::vector<xla::XlaOp> vectors;
absl::c_transform(scalars, std::back_inserter(vectors),
[](xla::XlaOp x) { return xla::Reshape(x, {1}); });
return ConcatInDim(builder, vectors, 0);
}
namespace {
XlaOp RotateLeftU32(XlaOp v, int distance) {
return (v << ConstantR0<uint32_t>(v.builder(), distance)) |
ShiftRightLogical(v, ConstantR0<uint32_t>(v.builder(), 32 - distance));
}
using ThreeFry2x32State = std::array<XlaOp, 2>;
ThreeFry2x32State ThreeFry2x32(ThreeFry2x32State input, ThreeFry2x32State key) {
XlaBuilder* builder = input[0].builder();
key[0] = BitcastConvertType(key[0], U32);
key[1] = BitcastConvertType(key[1], U32);
constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
ThreeFry2x32State x;
std::array<XlaOp, 3> ks;
ks[2] = ConstantR0<uint32_t>(builder, 0x1BD11BDA);
for (int i = 0; i < 2; ++i) {
ks[i] = key[i];
x[i] = input[i];
ks[2] = ks[2] ^ key[i];
}
x[0] = x[0] + ks[0];
x[1] = x[1] + ks[1];
auto round = [](ThreeFry2x32State v, int rotation) {
v[0] = v[0] + v[1];
v[1] = RotateLeftU32(v[1], rotation);
v[1] = v[0] ^ v[1];
return v;
};
x = round(x, rotations[0]);
x = round(x, rotations[1]);
x = round(x, rotations[2]);
x = round(x, rotations[3]);
x[0] = x[0] + ks[1];
x[1] = x[1] + ks[2] + ConstantR0<uint32_t>(builder, 1);
x = round(x, rotations[4]);
x = round(x, rotations[5]);
x = round(x, rotations[6]);
x = round(x, rotations[7]);
x[0] = x[0] + ks[2];
x[1] = x[1] + ks[0] + ConstantR0<uint32_t>(builder, 2);
x = round(x, rotations[0]);
x = round(x, rotations[1]);
x = round(x, rotations[2]);
x = round(x, rotations[3]);
x[0] = x[0] + ks[0];
x[1] = x[1] + ks[1] + ConstantR0<uint32_t>(builder, 3);
x = round(x, rotations[4]);
x = round(x, rotations[5]);
x = round(x, rotations[6]);
x = round(x, rotations[7]);
x[0] = x[0] + ks[1];
x[1] = x[1] + ks[2] + ConstantR0<uint32_t>(builder, 4);
x = round(x, rotations[0]);
x = round(x, rotations[1]);
x = round(x, rotations[2]);
x = round(x, rotations[3]);
x[0] = x[0] + ks[2];
x[1] = x[1] + ks[0] + ConstantR0<uint32_t>(builder, 5);
return x;
}
std::array<XlaOp, 2> Uint64ToUint32s(XlaOp u64) {
XlaBuilder* builder = u64.builder();
XlaOp const32 = ConstantR0WithType(builder, U64, 32);
XlaOp fst = ConvertElementType(u64, U32);
XlaOp snd = ConvertElementType(ShiftRightLogical(u64, const32), U32);
return {fst, snd};
}
XlaOp Uint32sToUint64(std::array<XlaOp, 2> u32s) {
XlaBuilder* builder = u32s[0].builder();
return ConvertElementType(u32s[0], U64) |
ShiftLeft(ConvertElementType(u32s[1], U64),
ConstantR0WithType(builder, U64, 32));
}
std::pair<ThreeFry2x32State, XlaOp> GetThreeFryInputsAndUpdatedState(
XlaOp initial_state, const Shape& shape) {
XlaBuilder* builder = initial_state.builder();
auto u64_shape = ShapeUtil::MakeShape(U64, shape.dimensions());
auto input_u64 = Broadcast(Reshape(initial_state, {}), shape.dimensions());
int64_t trailing_dims_product = 1;
for (int64_t i = shape.rank() - 1; i >= 0; --i) {
if (shape.dimensions(i) < 2) {
continue;
}
input_u64 =
input_u64 + (Iota(builder, u64_shape, i) *
ConstantR0<uint64_t>(builder, trailing_dims_product));
trailing_dims_product *= shape.dimensions(i);
}
XlaOp new_state = initial_state +
ConstantR0<uint64_t>(builder, ShapeUtil::ElementsIn(shape));
return std::make_pair(Uint64ToUint32s(input_u64), new_state);
}
struct SplitShapePair {
Shape half_shape;
Shape concat_shape;
int64_t split_dim;
int64_t new_concat_dim;
};
SplitShapePair SplitShapeIntoHalves(const Shape& shape) {
SplitShapePair pair;
if (shape.rank() == 0) {
pair.half_shape = ShapeUtil::MakeShape(shape.element_type(), {1});
pair.concat_shape = ShapeUtil::MakeShape(shape.element_type(), {2});
pair.split_dim = 0;
pair.new_concat_dim = 0;
return pair;
}
pair.split_dim = -1;
for (int64_t i = 0; i < shape.rank(); ++i) {
if (shape.dimensions(i) % 2 == 0) {
pair.split_dim = i;
break;
}
}
if (pair.split_dim == -1) {
for (int64_t i = 0; i < shape.rank(); ++i) {
if (pair.split_dim == -1 ||
shape.dimensions(i) > shape.dimensions(pair.split_dim)) {
pair.split_dim = i;
}
}
}
if (pair.split_dim < 0) {
LOG(ERROR) << "This point shouldn't have been reached.";
}
std::vector<int64_t> half_shape_dims;
std::vector<int64_t> concat_shape_dims;
const auto rank = shape.rank();
half_shape_dims.reserve(rank + 1);
concat_shape_dims.reserve(rank + 1);
for (int64_t i = 0; i < rank; ++i) {
if (i == pair.split_dim) {
half_shape_dims.push_back(CeilOfRatio<int64_t>(shape.dimensions(i), 2));
half_shape_dims.push_back(1);
concat_shape_dims.push_back(half_shape_dims[i]);
concat_shape_dims.push_back(2);
} else {
half_shape_dims.push_back(shape.dimensions(i));
concat_shape_dims.push_back(shape.dimensions(i));
}
}
pair.new_concat_dim = pair.split_dim + 1;
pair.half_shape = ShapeUtil::MakeShape(shape.element_type(), half_shape_dims);
pair.concat_shape =
ShapeUtil::MakeShape(shape.element_type(), concat_shape_dims);
return pair;
}
XlaOp CombineShapePair(absl::Span<const XlaOp> pair,
const SplitShapePair& shape_pair,
const Shape& original_shape) {
if (original_shape.rank() == 0) {
return Reshape(pair[0], {});
}
XlaBuilder* builder = pair[0].builder();
XlaOp result = ConcatInDim(builder, pair, shape_pair.new_concat_dim);
const int64_t pre_split_size =
original_shape.dimensions(shape_pair.split_dim);
std::vector<int64_t> reshape_dims(original_shape.dimensions().begin(),
original_shape.dimensions().end());
reshape_dims[shape_pair.split_dim] = RoundUpTo<int64_t>(pre_split_size, 2);
result = Reshape(result, reshape_dims);
if (reshape_dims[shape_pair.split_dim] != pre_split_size) {
result = Slice(result, std::vector<int64_t>(original_shape.rank(), 0),
original_shape.dimensions(),
std::vector<int64_t>(original_shape.rank(), 1));
}
return result;
}
RngOutput ThreeFryRngBit32(XlaOp key, XlaOp initial_state, const Shape& shape) {
auto shape_pair = SplitShapeIntoHalves(shape);
std::pair<ThreeFry2x32State, XlaOp> inputs_state =
GetThreeFryInputsAndUpdatedState(initial_state, shape_pair.half_shape);
ThreeFry2x32State inputs = inputs_state.first;
ThreeFry2x32State outputs = ThreeFry2x32(inputs, Uint64ToUint32s(key));
XlaOp result = CombineShapePair(outputs, shape_pair, shape);
return {result, inputs_state.second};
}
RngOutput ThreeFryRngBitNarrow(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
auto new_shape = shape;
new_shape.set_element_type(U32);
auto output = ThreeFryRngBit32(op_key, initial_state, new_shape);
output.value = ConvertElementType(
output.value, primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(shape.element_type())));
return output;
}
RngOutput ThreeFryRngBit64(XlaOp key, XlaOp initial_state, const Shape& shape) {
std::pair<ThreeFry2x32State, XlaOp> inputs_state =
GetThreeFryInputsAndUpdatedState(initial_state, shape);
ThreeFry2x32State inputs = inputs_state.first;
ThreeFry2x32State outputs = ThreeFry2x32(inputs, Uint64ToUint32s(key));
XlaOp result = Uint32sToUint64(outputs);
return {result, inputs_state.second};
}
using Philox4x32Key = std::array<XlaOp, 2>;
using Philox4x32State = std::array<XlaOp, 4>;
Philox4x32State Philox4x32(Philox4x32State state, Philox4x32Key key) {
static const uint32_t kPhiloxW32A = 0x9E3779B9;
static const uint32_t kPhiloxW32B = 0xBB67AE85;
static const uint32_t kPhiloxM4x32A = 0xD2511F53;
static const uint32_t kPhiloxM4x32B = 0xCD9E8D57;
struct HighLowPair {
XlaOp high;
XlaOp low;
};
auto mul_hi_low = [](XlaOp x, uint32_t k) {
auto product =
ConvertElementType(x, U64) * ConstantR0<uint64_t>(x.builder(), k);
auto low = ConvertElementType(product, U32);
auto high = ConvertElementType(
product >> ConstantR0<uint64_t>(x.builder(), 32), U32);
return HighLowPair{high, low};
};
auto philox_round = [&](Philox4x32State x, Philox4x32Key key) {
auto product0 = mul_hi_low(x[0], kPhiloxM4x32A);
auto product1 = mul_hi_low(x[2], kPhiloxM4x32B);
return Philox4x32State{product1.high ^ x[1] ^ key[0], product1.low,
product0.high ^ x[3] ^ key[1], product0.low};
};
auto raise_key = [](Philox4x32Key key) {
XlaBuilder* builder = key[0].builder();
return Philox4x32Key{key[0] + ConstantR0<uint32_t>(builder, kPhiloxW32A),
key[1] + ConstantR0<uint32_t>(builder, kPhiloxW32B)};
};
static const int kNumRounds = 10;
for (int round = 0; round < kNumRounds; ++round, key = raise_key(key)) {
state = philox_round(state, key);
}
return state;
}
std::pair<Philox4x32State, Philox4x32Key> ScramblePhiloxKey(Philox4x32Key key) {
XlaBuilder* builder = key[0].builder();
XlaOp key0 = ConvertElementType(key[0], U64);
XlaOp key1 = ConvertElementType(key[1], U64);
Philox4x32State state = {
ConvertElementType(key0, U32),
ConvertElementType(key0 >> ScalarLike(key0, 32), U32),
ConvertElementType(key1, U32),
ConvertElementType(key1 >> ScalarLike(key1, 32), U32),
};
key = {ConstantR0<uint32_t>(builder, 0x3ec8f720),
ConstantR0<uint32_t>(builder, 0x02461e29)};
state = Philox4x32(state, key);
XlaOp zero = ConstantR0<uint32_t>(builder, 0);
return {Philox4x32State{zero, zero, state[2], state[3]},
Philox4x32Key{state[0], state[1]}};
}
std::array<XlaOp, 2> Uint128AddUint64(
const std::array<XlaOp, 2>& u128, XlaOp u64,
absl::Span<const int64_t> broadcast_sizes = {}) {
auto u128_low = u128[0];
auto u128_high = u128[1];
XlaOp new_u128_low = u128_low + u64;
XlaOp one = ConstantR0<uint64_t>(u128[0].builder(), 1);
XlaOp new_u128_high = Select(Lt(new_u128_low, u128_low),
Broadcast(u128_high + one, broadcast_sizes),
Broadcast(u128_high, broadcast_sizes));
return {new_u128_low, new_u128_high};
}
std::array<XlaOp, 2> Uint32sToUint128(const std::array<XlaOp, 4>& u32s) {
return {Uint32sToUint64({u32s[0], u32s[1]}),
Uint32sToUint64({u32s[2], u32s[3]})};
}
std::array<XlaOp, 4> Uint128ToUint32s(const std::array<XlaOp, 2>& u128) {
std::array<XlaOp, 2> u128_low_32s = Uint64ToUint32s(u128[0]);
std::array<XlaOp, 2> u128_high_32s = Uint64ToUint32s(u128[1]);
return {u128_low_32s[0], u128_low_32s[1], u128_high_32s[0], u128_high_32s[1]};
}
std::array<XlaOp, 2> Uint128FromOp(XlaOp op) {
auto u128_low = xla::Reshape(xla::Slice(op, {0}, {1}, {1}), {});
auto u128_high = xla::Reshape(xla::Slice(op, {1}, {2}, {1}), {});
return {u128_low, u128_high};
}
XlaOp Uint128ToOp(std::array<XlaOp, 2> u128) {
return ConcatScalars(u128[0].builder(), {u128[0], u128[1]});
}
std::pair<Philox4x32State, XlaOp> GetPhiloxInputsAndUpdatedState(
const Philox4x32State& state, int64_t n) {
XlaBuilder* builder = state[0].builder();
XlaOp iota = Iota(builder, U64, n);
auto state_u128 = Uint32sToUint128(state);
auto inputs = Uint128ToUint32s(Uint128AddUint64(state_u128, iota, {n}));
XlaOp new_state = Uint128ToOp(
Uint128AddUint64(state_u128, ConstantR0<uint64_t>(builder, n)));
return std::make_pair(inputs, new_state);
}
std::pair<Philox4x32State, XlaOp> GeneratePhiloxBits(int64_t num_elems,
XlaOp initial_state,
Philox4x32Key key) {
Philox4x32State state;
state = Uint128ToUint32s(Uint128FromOp(initial_state));
const int64_t num_vector4 = CeilOfRatio<int64_t>(num_elems, 4);
Philox4x32State inputs;
XlaOp new_state;
std::tie(inputs, new_state) =
GetPhiloxInputsAndUpdatedState(state, num_vector4);
auto outputs = Philox4x32(inputs, key);
return std::make_pair(outputs, new_state);
}
RngOutput PhiloxRngBit32(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
XlaBuilder* builder = op_key.builder();
const int64_t num_elems = ShapeUtil::ElementsIn(shape);
Philox4x32Key key = Uint64ToUint32s(op_key);
Philox4x32State bits;
XlaOp new_state;
std::tie(bits, new_state) = GeneratePhiloxBits(num_elems, initial_state, key);
int64_t bits_len = (num_elems + 3) / 4;
for (auto i = 0; i < 4; ++i) {
bits[i] = Reshape(bits[i], {bits_len, 1});
}
XlaOp numbers = ConcatInDim(builder, {bits[0], bits[1], bits[2], bits[3]},
1);
numbers = Reshape(numbers, {bits_len * 4});
numbers = Slice(numbers, {0},
{num_elems},
{1});
return {Reshape(numbers, shape.dimensions()), new_state};
}
RngOutput PhiloxRngBitNarrow(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
auto new_shape = shape;
new_shape.set_element_type(U32);
auto output = PhiloxRngBit32(op_key, initial_state, new_shape);
output.value = ConvertElementType(
output.value, primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(shape.element_type())));
return output;
}
RngOutput PhiloxRngBit64(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
XlaBuilder* builder = op_key.builder();
const int64_t num_elems = ShapeUtil::ElementsIn(shape);
Philox4x32Key key = Uint64ToUint32s(op_key);
Philox4x32State bits32;
XlaOp new_state;
std::tie(bits32, new_state) =
GeneratePhiloxBits(num_elems * 2, initial_state, key);
std::array<XlaOp, 2> bits64;
bits64[0] = Uint32sToUint64({bits32[0], bits32[1]});
bits64[1] = Uint32sToUint64({bits32[2], bits32[3]});
int64_t bits64_len = (num_elems + 1) / 2;
for (auto i = 0; i < 2; ++i) {
bits64[i] = Reshape(bits64[i], {bits64_len, 1});
}
XlaOp numbers = ConcatInDim(builder, {bits64[0], bits64[1]},
1);
numbers = Reshape(numbers, {bits64_len * 2});
numbers = Slice(numbers, {0},
{num_elems},
{1});
return {Reshape(numbers, shape.dimensions()), new_state};
}
XlaOp ConvertRandomBitsToUniformFloatingPoint(XlaOp bits, XlaOp minval,
XlaOp maxval) {
XlaBuilder* builder = bits.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* minval_shape,
builder->GetShapePtr(minval));
TF_ASSIGN_OR_RETURN(const Shape* bits_shape, builder->GetShapePtr(bits));
PrimitiveType value_type = minval_shape->element_type();
PrimitiveType bit_type = bits_shape->element_type();
if (!primitive_util::IsFloatingPointType(value_type) ||
!primitive_util::IsIntegralType(bit_type)) {
return InvalidArgument(
"In ConvertRandomBitsToUniformFloatingPoint, value_type and bit_type "
"can only be (floating_type, integer_type). Got combination: (%s, "
"%s).",
primitive_util::LowercasePrimitiveTypeName(value_type),
primitive_util::LowercasePrimitiveTypeName(bit_type));
}
if (value_type == F16 && bit_type == U16) {
auto mantissa = bits & ScalarLike(bits, 0x3ffu);
auto exponent = ScalarLike(bits, static_cast<uint16_t>(15) << 10);
auto u16_result = exponent | mantissa;
auto result = BitcastConvertType(u16_result, F16);
return result - ScalarLike(result, 1.0);
} else {
int num_bits = primitive_util::BitWidth(bit_type);
int num_mantissa_bits = primitive_util::SignificandWidth(value_type) - 1;
if (num_mantissa_bits > num_bits) {
return InvalidArgument(
"%s bit type argument must have enough bits to cover the number of "
"mantissa bits of the result type %s",
primitive_util::LowercasePrimitiveTypeName(bit_type),
primitive_util::LowercasePrimitiveTypeName(value_type));
}
bits = ShiftRightLogical(bits,
ScalarLike(bits, num_bits - num_mantissa_bits));
XlaOp values = ConvertElementType(bits, value_type);
values = values * ScalarLike(values, std::ldexp(1., -num_mantissa_bits));
return values * (maxval - minval) + minval;
}
});
}
XlaOp ConvertRandomBitsToUniformInt(XlaOp bits, XlaOp minval, XlaOp maxval,
PrimitiveType type,
PrimitiveType unsigned_type) {
XlaBuilder* builder = bits.builder();
XlaOp range = BitcastConvertType(maxval, unsigned_type) -
BitcastConvertType(minval, unsigned_type);
XlaOp dist = Rem(bits, range);
XlaOp dist_div_2 =
ShiftRightLogical(dist, ConstantR0WithType(builder, unsigned_type, 1));
return minval + BitcastConvertType(dist_div_2, type) +
BitcastConvertType(dist - dist_div_2, type);
}
std::pair<XlaOp, XlaOp> BoxMullerTransform(XlaOp x0, XlaOp x1) {
XlaOp u1 = Max(x0, ScalarLike(x0, 1.0e-7f));
XlaOp v1 = ScalarLike(x1, 2.0f * M_PI) * x1;
XlaOp u2 = Sqrt(ScalarLike(u1, -2.0f) * Log(u1));
return {Sin(v1) * u2, Cos(v1) * u2};
}
}
XlaOp PhiloxIncreaseCounter(XlaOp counter, XlaOp delta) {
return Uint128ToOp(Uint128AddUint64(Uint128FromOp(counter), delta));
}
RngOutput ThreeFryBitGenerator(XlaOp key, XlaOp initial_state,
const Shape& shape) {
PrimitiveType type = shape.element_type();
return primitive_util::PrimitiveTypeSwitch<RngOutput>(
[&](auto primitive_type_constant) -> RngOutput {
if constexpr (primitive_util::IsArrayType(primitive_type_constant) &&
!primitive_util::IsComplexType(primitive_type_constant) &&
primitive_type_constant != PRED) {
const int kBits = primitive_util::BitWidth(primitive_type_constant);
if (kBits < 32) {
return ThreeFryRngBitNarrow(key, initial_state, shape);
}
if (kBits == 32) {
return ThreeFryRngBit32(key, initial_state, shape);
}
if (kBits == 64) {
return ThreeFryRngBit64(key, initial_state, shape);
}
}
return {
key.builder()->ReportError(Unimplemented(
"Types other than F16, F32, F64, U16, S16, U32, S32, U64 and "
"S64 are not implemented by ThreeFryBitGenerator; got %s",
primitive_util::LowercasePrimitiveTypeName(type))),
initial_state};
},
type);
}
RngOutput PhiloxBitGenerator(XlaOp key, XlaOp initial_state,
const Shape& shape) {
PrimitiveType type = shape.element_type();
return primitive_util::PrimitiveTypeSwitch<RngOutput>(
[&](auto primitive_type_constant) -> RngOutput {
if constexpr (primitive_util::IsArrayType(primitive_type_constant) &&
!primitive_util::IsComplexType(primitive_type_constant) &&
primitive_type_constant != PRED) {
const int kBits = primitive_util::BitWidth(primitive_type_constant);
if (kBits < 32) {
return PhiloxRngBitNarrow(key, initial_state, shape);
}
if (kBits == 32) {
return PhiloxRngBit32(key, initial_state, shape);
}
if (kBits == 64) {
return PhiloxRngBit64(key, initial_state, shape);
}
}
return {
key.builder()->ReportError(Unimplemented(
"Types other than F16, F32, F64, U16, S16, U32, S32, U64 and "
"S64 are not implemented by PhiloxBitGenerator; got %s",
primitive_util::LowercasePrimitiveTypeName(type))),
initial_state};
},
type);
}
std::pair<XlaOp, XlaOp> ScramblePhiloxKey(XlaOp key) {
Philox4x32Key pkey = Uint64ToUint32s(key);
auto state_key = ScramblePhiloxKey(pkey);
return std::make_pair(Uint128ToOp(Uint32sToUint128(state_key.first)),
Uint32sToUint64(state_key.second));
}
RngOutput UniformFloatingPointDistribution(XlaOp key, XlaOp initial_state,
BitGeneratorTy bit_generator,
XlaOp minval, XlaOp maxval,
const Shape& shape) {
RngOutput bits_state = bit_generator(key, initial_state, shape);
XlaOp bits = bits_state.value;
XlaOp new_state = bits_state.state;
return {ConvertRandomBitsToUniformFloatingPoint(bits, minval, maxval),
new_state};
}
RngOutput UniformIntDistribution(XlaOp key, XlaOp initial_state,
BitGeneratorTy bit_generator, XlaOp minval,
XlaOp maxval, const Shape& shape) {
RngOutput bits_state = bit_generator(key, initial_state, shape);
XlaOp bits = bits_state.value;
XlaOp new_state = bits_state.state;
PrimitiveType type = shape.element_type();
PrimitiveType unsigned_type;
if (type == U32 || type == S32) {
unsigned_type = U32;
} else if (type == U64 || type == S64) {
unsigned_type = U64;
} else {
return {key.builder()->ReportError(Unimplemented(
"Types other than U32, S32, U64 and S64 "
"are not implemented by UniformIntDistribution; got %s",
primitive_util::LowercasePrimitiveTypeName(type))),
initial_state};
}
return {
ConvertRandomBitsToUniformInt(bits, minval, maxval, type, unsigned_type),
new_state};
}
RngOutput NormalFloatingPointDistribution(XlaOp key, XlaOp initial_state,
BitGeneratorTy bit_generator,
const Shape& shape) {
XlaBuilder* builder = key.builder();
PrimitiveType primitive_type = shape.element_type();
if (!(primitive_type == F16 || primitive_type == F32 ||
primitive_type == F64)) {
return {
builder->ReportError(Unimplemented(
"Types other than F16, F32 and F64 "
"are not implemented by NormalFloatingPointDistribution; got %s",
primitive_util::LowercasePrimitiveTypeName(primitive_type))),
initial_state};
}
auto shape_pair = SplitShapeIntoHalves(shape);
RngOutput bits_state = UniformFloatingPointDistribution(
key, initial_state, bit_generator,
xla::ConstantR0WithType(builder, primitive_type, 0.0),
xla::ConstantR0WithType(builder, primitive_type, 1.0),
shape_pair.concat_shape);
XlaOp bits_0 = Slice(bits_state.value,
std::vector<int64_t>(shape_pair.half_shape.rank(), 0),
shape_pair.half_shape.dimensions(),
std::vector<int64_t>(shape_pair.half_shape.rank(), 1));
std::vector<int64_t> bits_1_starts(shape_pair.half_shape.rank(), 0);
bits_1_starts[shape_pair.new_concat_dim] = 1;
XlaOp bits_1 = Slice(bits_state.value, bits_1_starts,
shape_pair.concat_shape.dimensions(),
std::vector<int64_t>(shape_pair.half_shape.rank(), 1));
std::tie(bits_0, bits_1) = BoxMullerTransform(bits_0, bits_1);
XlaOp normal = CombineShapePair({bits_0, bits_1}, shape_pair, shape);
return {normal, bits_state.state};
}
} | #include "xla/hlo/builder/lib/prng.h"
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class PrngTest : public ClientLibraryTestBase {
public:
template <PrimitiveType value_type, PrimitiveType bit_type,
typename ValueT = typename primitive_util::PrimitiveTypeToNative<
value_type>::type,
typename BitT =
typename primitive_util::PrimitiveTypeToNative<bit_type>::type>
void TestConvertRandomBitsToUniformFloatingPoint(uint32_t bits, float minval,
float maxval) {
XlaBuilder builder("convert_random_bits_to_uniform_floating_point");
XlaOp bits_op = ConstantR0<BitT>(&builder, static_cast<BitT>(bits));
XlaOp minval_op = ConstantR0<ValueT>(&builder, static_cast<ValueT>(minval));
XlaOp maxval_op = ConstantR0<ValueT>(&builder, static_cast<ValueT>(maxval));
XlaOp seed = ConstantR0<uint64_t>(&builder, 42);
XlaOp initial_state = Zero(&builder, PrimitiveType::U64);
BitGeneratorTy bit_generator = [](XlaOp key, XlaOp state,
const Shape& shape) {
state = ConcatScalars(key.builder(), {key, state});
XlaOp result =
RngBitGenerator(RandomAlgorithm::RNG_DEFAULT, state, shape);
return RngOutput{GetTupleElement(result, 1),
GetTupleElement(result, 0)};
};
const Shape rng_shape = builder.GetShape(bits_op).value();
EXPECT_EQ(rng_shape.element_type(), bit_type);
RngOutput rng_output = UniformFloatingPointDistribution(
seed, initial_state, bit_generator, minval_op, maxval_op, rng_shape);
if (rng_output.value.valid()) {
XlaOp result = rng_output.value;
EXPECT_EQ(builder.GetShape(result).value().element_type(), value_type);
XlaOp result_ge_min = Ge(result, minval_op);
XlaOp result_lt_max = Lt(result, maxval_op);
And(result_ge_min, result_lt_max);
ComputeAndCompareR0<bool>(&builder, true, {});
} else {
EXPECT_EQ(builder.first_error().code(),
absl::StatusCode::kInvalidArgument);
}
}
};
XLA_TEST_F(PrngTest, RandomBitsToUniformFloatingPointInvalidArguments) {
TestConvertRandomBitsToUniformFloatingPoint<PrimitiveType::F32,
PrimitiveType::U16>(0x1234, 0.0f,
1.0f);
TestConvertRandomBitsToUniformFloatingPoint<PrimitiveType::F16,
PrimitiveType::U8>(0x12, 0.0f,
1.0f);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/prng.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/prng_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c721eaba-76e8-4a86-a444-76fea2575e91 | cpp | tensorflow/tensorflow | custom_call_target_registry | third_party/xla/xla/service/custom_call_target_registry.cc | third_party/xla/xla/service/custom_call_target_registry_test.cc | #include "xla/service/custom_call_target_registry.h"
#include <cstdlib>
#include <iostream>
#include <mutex>
#include <string>
#include <unordered_map>
#include <utility>
namespace xla {
CustomCallTargetRegistry* CustomCallTargetRegistry::Global() {
static auto* registry = new CustomCallTargetRegistry;
return registry;
}
void CustomCallTargetRegistry::Register(const std::string& symbol,
void* address,
const std::string& platform) {
std::lock_guard<std::mutex> lock(mu_);
const auto [it, inserted] =
registered_symbols_.insert({{symbol, platform}, address});
if (!inserted && it->second != address) {
std::cerr << "Duplicate custom call registration detected for symbol \""
<< symbol << "\" with different addresses " << address
<< "(current) and " << it->second << " (previous) on platform "
<< platform
<< "Rejecting the registration to avoid confusion about which "
"symbol would actually get used at runtime.\n";
std::exit(1);
}
}
void* CustomCallTargetRegistry::Lookup(const std::string& symbol,
const std::string& platform) const {
std::lock_guard<std::mutex> lock(mu_);
auto it = registered_symbols_.find(std::make_pair(symbol, platform));
return it == registered_symbols_.end() ? nullptr : it->second;
}
std::unordered_map<std::string, void*>
CustomCallTargetRegistry::registered_symbols(
const std::string& platform) const {
std::unordered_map<std::string, void*> calls;
std::lock_guard<std::mutex> lock(mu_);
for (const auto& [metadata, address] : registered_symbols_) {
if (metadata.second == platform) {
calls[metadata.first] = address;
}
}
return calls;
}
} | #include "xla/service/custom_call_target_registry.h"
#include "xla/service/custom_call_status.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::_;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
void custom_call(void*, const void**, XlaCustomCallStatus*) {}
void custom_call2(void*, const void**, XlaCustomCallStatus*) {}
TEST(CustomCallRegistryTest, Registers) {
CustomCallTargetRegistry registry;
EXPECT_EQ(registry.Lookup("custom_call", "Host"), nullptr);
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"Host");
EXPECT_EQ(custom_call, registry.Lookup("custom_call", "Host"));
registry.Register("custom_call2", reinterpret_cast<void*>(&custom_call),
"Host");
EXPECT_EQ(registry.Lookup("custom_call", "CUDA"), nullptr);
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"CUDA");
EXPECT_EQ(custom_call, registry.Lookup("custom_call", "CUDA"));
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"Host");
EXPECT_THAT(
registry.registered_symbols("Host"),
UnorderedElementsAre(Pair("custom_call", _), Pair("custom_call2", _)));
EXPECT_THAT(registry.registered_symbols("CUDA"),
UnorderedElementsAre(Pair("custom_call", _)));
}
TEST(CustomCallRegistryDeathTest, RejectsDuplicateRegistrations) {
CustomCallTargetRegistry registry;
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"Host");
EXPECT_DEATH(registry.Register("custom_call",
reinterpret_cast<void*>(custom_call2), "Host"),
"Duplicate custom call");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/custom_call_target_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/custom_call_target_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03576d5c-5806-40cf-9612-b9f8e63477e0 | cpp | tensorflow/tensorflow | multi_output_fusion | third_party/xla/xla/service/gpu/transforms/multi_output_fusion.cc | third_party/xla/xla/service/gpu/transforms/multi_output_fusion_test.cc | #include "xla/service/gpu/transforms/multi_output_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_dfs_reachability.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsProfitableOperand(HloInstruction* instr) {
return !ShapeUtil::IsEffectiveScalar(instr->shape());
}
const HloSliceInstruction* FindUniqueSlice(const HloInstruction* parent,
const HloInstruction* instr) {
if (const auto* slice = DynCast<HloSliceInstruction>(instr)) {
return slice;
} else if (const auto* fusion = DynCast<HloFusionInstruction>(instr)) {
const HloSliceInstruction* result = nullptr;
for (size_t i = 0; i < fusion->operand_count(); ++i) {
if (fusion->operand(i) == parent) {
if (result) return nullptr;
auto* called_param = fusion->fused_parameter(i);
if (called_param->user_count() != 1) return nullptr;
result = FindUniqueSlice(called_param, called_param->users()[0]);
if (!result) return nullptr;
}
}
return result;
} else {
return nullptr;
}
}
FusionDecision ParameterSlicesAreNonOverlapping(const HloInstruction& instr1,
const HloInstruction& instr2,
const HloInstruction* parent) {
if (parent->shape().IsTuple()) return FusionDecision::Allow();
if (ShapeUtil::ByteSizeOfElements(parent->shape()) < 1024) {
return FusionDecision::Allow();
}
const HloSliceInstruction* slice1 = FindUniqueSlice(parent, &instr1);
const HloSliceInstruction* slice2 = FindUniqueSlice(parent, &instr2);
if (!slice1 || !slice2) return FusionDecision::Allow();
auto& starts1 = slice1->slice_starts();
auto& starts2 = slice2->slice_starts();
auto& limits1 = slice1->slice_limits();
auto& limits2 = slice2->slice_limits();
for (int64_t dim = 0; dim < parent->shape().rank(); ++dim) {
bool overlap = starts1[dim] < limits2[dim] && starts2[dim] < limits1[dim];
if (!overlap) {
return FusionDecision::Forbid("slices are non-overlapping");
}
}
return FusionDecision::Allow();
}
FusionDecision LegalToFuse(const HloInstruction& instr1,
const HloInstruction& instr2,
const se::DeviceDescription& device_info,
FusionInfoCache* fusion_info_cache) {
CHECK(instr1.opcode() == HloOpcode::kFusion);
if (instr1.fused_expression_root()->opcode() ==
HloOpcode::kDynamicUpdateSlice ||
(instr2.opcode() == HloOpcode::kFusion &&
instr2.fused_expression_root()->opcode() ==
HloOpcode::kDynamicUpdateSlice)) {
return FusionDecision::Forbid("can't fuse multiple DUSs");
}
return FusionFitsInBudget(instr1, instr2, device_info,
false,
fusion_info_cache);
}
int FusionPriority(const HloInstruction* instr) {
if (instr->IsMultiOutputFusion()) {
return 2;
}
if (instr->opcode() == HloOpcode::kFusion) {
return 1;
}
return 0;
}
HloInstruction* SelectPreferredFusionCandidate(
const std::vector<HloInstruction*> candidates) {
if (candidates.empty()) {
return nullptr;
}
return *std::max_element(
candidates.begin(), candidates.end(),
[](const HloInstruction* a, const HloInstruction* b) {
return FusionPriority(a) < FusionPriority(b);
});
}
FusionDecision OperandReachableFromProducer(
const HloInstruction& producer, const HloInstruction& consumer,
const HloDfsReachability& reachability) {
for (const auto* operand : consumer.operands()) {
if (!reachability.IsPresent(operand) &&
operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->operand(0);
}
CHECK(reachability.IsPresent(operand) && reachability.IsPresent(&producer))
<< "Reachability map is incomplete. This should never "
"happen.";
if (&producer != operand && reachability.IsReachable(&producer, operand)) {
return FusionDecision::Forbid(
absl::StrCat(producer.name(), " would introduce a cycle when fused"));
}
}
return FusionDecision::Allow();
}
FusionDecision ProducerCandidateIsFusible(
const HloInstruction& producer, const HloInstruction& consumer,
const HloDfsReachability& reachability, FusionInfoCache* fusion_info_cache,
const se::DeviceDescription& device_info,
GpuHloCostAnalysis* cost_analysis) {
if (!IsFusibleAsMultiOutputFusionRoot(consumer)) {
return FusionDecision::Forbid(
"consumer not eligible as multi-output fusion root.");
}
RETURN_IF_NOT_FUSIBLE(
ShapesCompatibleForMultiOutputFusion(consumer, producer));
RETURN_IF_NOT_FUSIBLE(
OperandReachableFromProducer(producer, consumer, reachability));
RETURN_IF_NOT_FUSIBLE(FusionFitsInBudget(
producer, consumer, device_info,
false, fusion_info_cache));
if (cost_analysis->ProducerConsumerMergedTooLarge(producer, consumer)) {
return FusionDecision::Forbid("will generate too large IR");
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
&producer, device_info, cost_analysis,
GpuPerformanceModelOptions::Default(),
{&consumer},
true);
if (t.time_fused > t.time_unfused) {
return FusionDecision::Forbid("will execute slower if fused");
}
return FusionDecision::Allow();
}
std::vector<HloInstruction*> GetProducerConsumerMultiOutputFusionCandidates(
const HloInstruction* producer, const HloDfsReachability& reachability,
FusionInfoCache* fusion_info_cache,
const se::DeviceDescription& device_info,
GpuHloCostAnalysis* cost_analysis) {
std::vector<HloInstruction*> fusion_candidates;
const HloComputation* computation = producer->parent();
const HloModule* module = computation->parent();
bool dump_fusion =
module->config().debug_options().xla_dump_fusion_visualization();
if (!IsProducerMultiOutputFusible(*producer)) {
return fusion_candidates;
}
if (producer->user_count() == 1 &&
!producer->users()[0]->IsMultiOutputFusion()) {
return fusion_candidates;
}
for (HloInstruction* consumer : producer->users()) {
VLOG(3) << "Looking at producer " << producer->name()
<< " and its consumer " << consumer->name();
if (auto decision = ProducerCandidateIsFusible(
*producer, *consumer, reachability, fusion_info_cache, device_info,
cost_analysis)) {
fusion_candidates.push_back(consumer);
} else if (dump_fusion) {
RegisterFusionState(
*computation,
absl::StrCat("Not considering fusion of producer |", producer->name(),
"| into consumer |", consumer->name(),
"| due to: ", decision.Explain()),
*consumer, producer);
}
}
return fusion_candidates;
}
bool IsSiblingFusionCandidate(const HloInstruction* instr) {
if (instr->users().empty() || !IsFusibleAsMultiOutputFusionRoot(*instr) ||
IsNestableVariadicReduction(*instr)) {
return false;
}
return (!instr->IsMultiOutputFusion() ||
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return user->opcode() == HloOpcode::kGetTupleElement;
}));
}
FusionDecision CanFuseSiblings(const HloInstruction& sibling_consumer_1,
const HloInstruction& sibling_consumer_2,
const HloInstruction& common_producer,
const HloDfsReachability& reachability,
FusionInfoCache* fusion_info_cache,
const se::DeviceDescription& device_info) {
if (reachability.IsConnected(&sibling_consumer_1, &sibling_consumer_2)) {
return FusionDecision::Forbid(
absl::StrCat(sibling_consumer_1.name(), " and ",
sibling_consumer_2.name(), " are connected"));
}
RETURN_IF_NOT_FUSIBLE(ShapesCompatibleForMultiOutputFusion(
sibling_consumer_1, sibling_consumer_2));
RETURN_IF_NOT_FUSIBLE(ParameterSlicesAreNonOverlapping(
sibling_consumer_1, sibling_consumer_2, &common_producer));
RETURN_IF_NOT_FUSIBLE(LegalToFuse(sibling_consumer_1, sibling_consumer_2,
device_info, fusion_info_cache));
return FusionDecision::Allow();
}
}
void MultiOutputFusion::RecomputeReachability() {
reachability_ = HloDfsReachability::Build(computation_);
}
bool MultiOutputFusion::FuseSiblings(HloInstruction* parent,
FusionInfoCache* fusion_info_cache,
GpuHloCostAnalysis* cost_analysis) {
const HloComputation* computation = parent->parent();
const HloModule* module = computation->parent();
bool dump_fusion =
module->config().debug_options().xla_dump_fusion_visualization();
if (!IsProfitableOperand(parent)) {
VLOG(3) << "Operand " << parent->ToShortString() << " is not profitable";
return false;
}
bool changed = false;
std::vector<HloInstruction*> siblings;
absl::c_copy_if(parent->users(), std::back_inserter(siblings),
IsSiblingFusionCandidate);
absl::c_stable_sort(siblings,
[](const HloInstruction* a, const HloInstruction* b) {
return FusionPriority(a) > FusionPriority(b);
});
for (auto i = siblings.begin(); i != siblings.end(); ++i) {
VLOG(3) << "Considering " << (*i)->name();
if ((*i)->opcode() != HloOpcode::kFusion) {
continue;
}
for (auto j = i + 1; j != siblings.end();) {
VLOG(3) << "Considering " << (*i)->name() << " and " << (*j)->name();
if (auto fusible = CanFuseSiblings(**i, **j, *parent, *reachability_,
fusion_info_cache, device_info_);
!fusible) {
if (dump_fusion) {
RegisterFusionState(
*computation,
absl::StrCat("Not fusing siblings |", (**i).name(), "| and |",
(**j).name(), "| due to: ", fusible.Explain()),
**i,
parent);
}
++j;
continue;
}
if (!ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing siblings %s and %s.",
(*i)->name(), (*j)->name());
})) {
++j;
continue;
}
VLOG(2) << "Fuse siblings " << (*i)->name() << " and " << (*j)->name();
fusion_info_cache->Invalidate(*i);
fusion_info_cache->Invalidate(*j);
HloInstruction* remaining = *i;
HloInstruction* fused = *j;
TF_CHECK_OK(cost_analysis->RemoveInstruction(remaining));
TF_CHECK_OK(cost_analysis->RemoveInstruction(fused));
DumpFusionState(*remaining,
absl::StrCat("About to fuse sibling |", fused->name(),
"| into sibling |", remaining->name(),
"| inside multi-output fusion"),
fused);
if (fused->opcode() == HloOpcode::kFusion) {
remaining->MergeFusionInstructionIntoMultiOutput(fused);
if (fused->IsInputFusion()) {
remaining->set_fusion_kind(HloInstruction::FusionKind::kInput);
}
} else {
remaining->FuseInstructionIntoMultiOutput(fused);
CHECK_EQ(0, fused->user_count());
TF_CHECK_OK(computation_->RemoveInstruction(fused));
}
DumpFusionState(*remaining,
absl::StrCat("Fused into |", remaining->name(),
"| inside multi-output fusion"));
TF_CHECK_OK(cost_analysis->RevisitInstruction(remaining));
changed = true;
siblings.erase(j);
RecomputeReachability();
}
}
return changed;
}
absl::StatusOr<bool> MultiOutputFusion::DoMultiOutputFusion() {
bool changed = false;
RecomputeReachability();
GpuHloCostAnalysis cost_analysis({shape_size_function_,
{},
{},
true},
device_info_);
TF_RETURN_IF_ERROR(computation_->Accept(&cost_analysis));
std::vector<HloInstruction*> defs_before_uses =
computation_->MakeInstructionPostOrder();
FusionInfoCache fusion_info_cache;
for (auto it = defs_before_uses.rbegin(); it != defs_before_uses.rend();
++it) {
auto* producer = *it;
if (producer->opcode() == HloOpcode::kConstant) {
VLOG(3) << producer->name() << " is a constant.";
continue;
}
if (producer->IsCustomFusion()) {
continue;
}
if (FuseSiblings(producer, &fusion_info_cache, &cost_analysis)) {
changed = true;
}
const auto candidates = GetProducerConsumerMultiOutputFusionCandidates(
producer, *reachability_, &fusion_info_cache, device_info_,
&cost_analysis);
auto* consumer_for_fusion = SelectPreferredFusionCandidate(candidates);
if (consumer_for_fusion == nullptr) {
continue;
}
if (!ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing %s and %s.", producer->name(),
consumer_for_fusion->name());
})) {
continue;
}
changed = true;
fusion_info_cache.Invalidate(producer);
fusion_info_cache.Invalidate(consumer_for_fusion);
TF_RETURN_IF_ERROR(cost_analysis.RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis.RemoveInstruction(consumer_for_fusion));
HloInstruction* input_fusion;
if (consumer_for_fusion->opcode() == HloOpcode::kFusion) {
input_fusion = consumer_for_fusion;
VLOG(2) << "Fuse producer " << producer->name() << " into its consumer "
<< consumer_for_fusion->name();
} else {
input_fusion = computation_->AddInstruction(HloInstruction::CreateFusion(
consumer_for_fusion->shape(),
ChooseFusionKind(*producer, *consumer_for_fusion),
consumer_for_fusion));
VLOG(2) << "Fuse producer " << producer->name() << " and its consumer "
<< consumer_for_fusion->name() << " into "
<< input_fusion->name();
TF_CHECK_OK(
computation_->ReplaceInstruction(consumer_for_fusion, input_fusion));
}
DumpFusionState(*input_fusion,
absl::StrCat("About to fuse producer |", producer->name(),
"| into consumer |", input_fusion->name(),
"| inside multi-output fusion"),
producer);
if (producer->opcode() == HloOpcode::kFusion) {
input_fusion->MergeFusionInstructionIntoMultiOutput(producer);
} else {
input_fusion->FuseInstructionIntoMultiOutput(producer);
CHECK_EQ(0, producer->user_count());
TF_CHECK_OK(computation_->RemoveInstruction(producer));
}
TF_RETURN_IF_ERROR(cost_analysis.RevisitInstruction(input_fusion));
DumpFusionState(*input_fusion,
absl::StrCat("Fused into |", input_fusion->name(),
"| inside multi-output fusion"));
RecomputeReachability();
}
return changed;
}
void MultiOutputFusion::DumpFusionState(const HloInstruction& consumer,
absl::string_view label,
const HloInstruction* producer) {
if (consumer.GetModule()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {
RegisterFusionState(*computation_, label, consumer, producer);
}
}
absl::StatusOr<bool> MultiOutputFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : GetFusibleComputations(*module, execution_threads)) {
computation_ = computation;
TF_ASSIGN_OR_RETURN(bool computation_changed, DoMultiOutputFusion());
changed |= computation_changed;
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/multi_output_fusion.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace m = ::xla::match;
class MultiOutputFusionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
MultiOutputFusion mof_{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
void CheckMultiOutputFusion(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo,
MultiOutputFusion{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()},
expected);
}
};
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
scalar_mul_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
})";
static int64_t CountMultiOutputFusions(const HloModule* module) {
int multi_output_fusion_count = 0;
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instr : computation->instructions()) {
if (instr->IsMultiOutputFusion()) {
multi_output_fusion_count++;
}
}
}
return multi_output_fusion_count;
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] constant(1)
fusion = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation
reduce.2 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion, reduce.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceInputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[6400]{0} parameter(1)
mul = f32[6400]{0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[6400]{0} parameter(1)
r1 = f32[64,100]{0,1} reshape(p1.2)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[] reduce(r1, const.2), dimensions={1,0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[6400]{0} parameter(1)
fusion.1 = f32[] fusion(p0, p1), kind=kInput, calls=fused_computation_1
fusion.2 = f32[] fusion(p0, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[], f32[]) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ReduceMofDifferentTypes) {
const char* hlo = R"(
HloModule module
scalar_add_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT add.1 = f32[] add(scalar_lhs.1, scalar_rhs.1)
}
scalar_add_computation_f16 {
scalar_lhs.0 = f16[] parameter(0)
scalar_rhs.0 = f16[] parameter(1)
ROOT add.0 = f16[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation {
param_0.2 = f32[128,512,28,28]{3,2,1,0} parameter(0)
c.1 = f16[128,512,28,28]{3,2,1,0} convert(param_0.2)
const.0 = f16[] constant(0)
ROOT reduce.0 = f16[512]{0} reduce(c.1, const.0), dimensions={0,2,3}, to_apply=scalar_add_computation_f16
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] constant(0)
reduce.1 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
fusion = f16[512]{0} fusion(p1), kind=kInput, calls=fused_computation
ROOT root = (f32[512]{0}, f16[512]{0}) tuple(reduce.1, fusion)
})";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
mul = f32[10,10]{1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0,1}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[10,10]{1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[10]{0} reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1.3 = f32[10,10]{1,0} parameter(1)
fusion.1 = f32[] fusion(p0, p1.3), kind=kInput, calls=fused_computation_1
p2 = f32[] parameter(2)
fusion.2 = f32[10]{0} fusion(p2, p1.3), kind=kInput, calls=fused_computation_2
ROOT root = (f32[], f32[10]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[512]{0} reduce(p1.2, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
fusion.1 = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation_1
fusion.2 = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionNoSiblingFusionForCommonScalar) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
param_0.87 = bf16[32,4096,16384]{2,1,0} parameter(0)
param_1.4620 = s32[] parameter(1)
constant_3949 = s32[] constant(0)
compare.1026 = pred[] compare(param_1.4620, constant_3949), direction=LT
constant_5437 = s32[] constant(32)
add.6859 = s32[] add(param_1.4620, constant_5437)
select.1599 = s32[] select(compare.1026, add.6859, param_1.4620)
dynamic-slice.59 = bf16[1,4096,16384]{2,1,0} dynamic-slice(param_0.87, select.1599, constant_3949, constant_3949), dynamic_slice_sizes={1,4096,16384}
ROOT bitcast.41089 = bf16[4096,16384]{1,0} bitcast(dynamic-slice.59)
}
fused_computation_2 {
param_0 = bf16[32,4096,16384]{2,1,0} parameter(0)
param_1 = s32[] parameter(1)
constant = s32[] constant(0)
compare = pred[] compare(param_1, constant), direction=LT
constant.32 = s32[] constant(32)
add = s32[] add(param_1, constant.32)
select = s32[] select(compare, add, param_1)
dynamic-slice = bf16[1,4096,16384]{2,1,0} dynamic-slice(param_0, select, constant, constant), dynamic_slice_sizes={1,4096,16384}
ROOT bitcast.41087 = bf16[4096,16384]{1,0} bitcast(dynamic-slice)
}
ENTRY entry {
p0 = s32[] parameter(0)
p1 = bf16[32,4096,16384]{2,1,0} parameter(1)
p2 = bf16[32,4096,16384]{2,1,0} parameter(2)
fusion.1 = bf16[4096,16384]{1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation_1
fusion.2 = bf16[4096,16384]{1,0} fusion(p2, p0), kind=kLoop, calls=fused_computation_2
ROOT root = (bf16[4096,16384]{1,0}, bf16[4096,16384]{1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingReduceAndReduceMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation (p0: f32[128,512,28,28]) -> (f32[512], f32[512]) {
const.1 = f32[] constant(1)
p0.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(f32[128,512,28,28]{3,2,1,0} p0.1, f32[128,512,28,28]{3,2,1,0} p0.1)
reduce.1 = f32[512]{0} reduce(f32[128,512,28,28]{3,2,1,0} mul, f32[] const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
reduce.2 = f32[512]{0} reduce(f32[128,512,28,28]{3,2,1,0} p0.1, f32[] const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT tuple = (f32[512]{0}, f32[512]{0}) tuple(f32[512]{0} reduce.1, f32[512]{0} reduce.2)
}
ENTRY entry (p0: f32[128,512,28,28]) -> (f32[512], f32[512], f32[512]) {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
const = f32[] constant(1)
fusion = (f32[512]{0}, f32[512]{0}) fusion(f32[128,512,28,28]{3,2,1,0} p0), kind=kInput, calls=fused_computation
get-tuple-element = f32[512]{0} get-tuple-element((f32[512]{0}, f32[512]{0}) fusion), index=0
get-tuple-element.1 = f32[512]{0} get-tuple-element((f32[512]{0}, f32[512]{0}) fusion), index=1
reduce.3 = f32[512]{0} reduce(p0, const), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT root = (f32[512]{0}, f32[512]{0}, f32[512]{0}) tuple(f32[512]{0} get-tuple-element, f32[512]{0} get-tuple-element.1, f32[512]{0} reduce.3)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingFusionCheckAgainstReduceOperand) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
mul = f32[10,10]{1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
reduce.1 = f32[] reduce(p1.1, const.1), dimensions={0,1}, to_apply=scalar_add_computation
ROOT tuple = (f32[10,10], f32[]) tuple(mul, reduce.1)
}
fused_computation_2 {
p1.2 = f32[10,10]{1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[10] reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[10,10]{1,0} parameter(1)
p2 = f32[] parameter(2)
fusion.1 = (f32[10,10], f32[]) fusion(p0, p1), kind=kInput, calls=fused_computation_1
get-tuple-element.1 = f32[10,10] get-tuple-element((f32[10,10], f32[]) fusion.1), index=0
get-tuple-element.2 = f32[] get-tuple-element((f32[10,10], f32[]) fusion.1), index=1
fusion.2 = f32[10] fusion(p2, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[10,10], f32[], f32[10]) tuple(get-tuple-element.1, get-tuple-element.2, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, LoopVariadicReductionFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.94 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax_func.1536 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94
}
fused_computation {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
fused_computation2 {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(inf)
tmp_3 = s32[] constant(1)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation
tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1
tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation2
tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1
ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5)
})"))
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, InputVariadicReductionFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.1117 {
param_0.2433 = f32[] parameter(0)
param_1.2571 = f32[] parameter(1)
compare.1770 = pred[] compare(param_0.2433, param_1.2571), direction=LE
select.682 = f32[] select(compare.1770, param_0.2433, param_1.2571)
compare.1303.clone.1 = pred[] compare(param_0.2433, param_1.2571), direction=EQ
param_2.6460 = s32[] parameter(2)
param_3.6755 = s32[] parameter(3)
minimum.633.clone.1 = s32[] minimum(param_2.6460, param_3.6755)
select.398.clone.1 = s32[] select(compare.1770, param_2.6460, param_3.6755)
select.397.clone.1 = s32[] select(compare.1303.clone.1, minimum.633.clone.1, select.398.clone.1)
ROOT tuple.151 = (f32[], s32[]) tuple(select.682, select.397.clone.1)
}
minmax_func.223 {
lhs_value.224 = f32[] parameter(0)
rhs_value.226 = f32[] parameter(2)
lhs_index.225 = s32[] parameter(1)
rhs_index.227 = s32[] parameter(3)
ROOT fusion.1117 = (f32[], s32[]) fusion(lhs_value.224, rhs_value.226, lhs_index.225, rhs_index.227), kind=kLoop, calls=fused_computation.1117
}
fused_computation.73 {
bitcast.86661 = f32[3,1024,300]{2,1,0} parameter(0)
iota.734 = s32[3,1,1024,300]{3,2,1,0} iota(), iota_dimension=3
bitcast.97555 = s32[3,1024,300]{2,1,0} bitcast(iota.734)
constant_3917 = f32[] constant(inf)
constant_3918 = s32[] constant(0)
ROOT reduce.1069 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) reduce(bitcast.86661, bitcast.97555, constant_3917, constant_3918), dimensions={2}, to_apply=minmax_func.223
}
fused_computation.84 {
bitcast.86676 = f32[3,1024,300]{2,1,0} parameter(0)
iota.732 = s32[3,1,1024,300]{3,2,1,0} iota(), iota_dimension=3
bitcast.97553 = s32[3,1024,300]{2,1,0} bitcast(iota.732)
constant_3915 = f32[] constant(inf)
constant_3916 = s32[] constant(0)
ROOT reduce.1070 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) reduce(bitcast.86676, bitcast.97553, constant_3915, constant_3916), dimensions={2}, to_apply=minmax_func.223
}
ENTRY e {
p0 = f32[3,1024,300]{2,1,0} parameter(0)
fusion.84 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) fusion(p0), kind=kInput, calls=fused_computation.84
gte.391 = s32[3,1024]{1,0} get-tuple-element(fusion.84), index=1
fusion.73 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) fusion(p0), kind=kInput, calls=fused_computation.73
gte.393 = s32[3,1024]{1,0} get-tuple-element(fusion.73), index=1
ROOT r = s32[3,1024]{1,0} add(gte.391, gte.393)
})"))
.value();
EXPECT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->parameter_instruction(0)->user_count(),
1);
const HloInstruction* fusion =
module->entry_computation()->parameter_instruction(0)->users()[0];
EXPECT_THAT(fusion, GmockMatch(m::Fusion()));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionTwoLoops) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
ROOT div = f32[6400]{0} divide(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Divide())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
div = f32[6400]{0} divide(p0, broadcast)
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Divide())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopsDifferentShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
ROOT mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
ROOT reduce = f32[1,5,1,2]{3,2,1,0} reduce(p0.2, const.2), dimensions={0,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
fusion.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[1,5,1,2]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[8,1,5,16,1,2]{5,4,3,2,1,0}, f32[1,5,1,2]{3,2,1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopAndMultiOutputLoop) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2),
dimensions={}
ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0})
tuple(gte0, gte1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Exp(), m::Add())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingMultiOutputLoopAndMultiOutputLoop) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,16]{1,0} parameter(0)
mul = f32[8,16]{1,0} multiply(p0.1, p0.1)
exp = f32[8,16]{1,0} exponential(p0.1)
ROOT tuple = (f32[8,16]{1,0}, f32[8,16]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,16]{1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,16]{1,0} broadcast(const.2),
dimensions={}
add = f32[8,16]{1,0} add(p0.2, broadcast)
ROOT tuple.1 = (f32[8,16]{1,0}, f32[8,16]{1,0}) tuple(add, broadcast)
}
ENTRY entry {
p0 = f32[8,16]{1,0} parameter(0)
fusion.1 = (f32[8,16]{1,0}, f32[8,16]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = (f32[8,16]{1,0}, f32[8,16]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,16]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,16]{1,0} get-tuple-element(fusion.1), index=1
gte2 = f32[8,16]{1,0} get-tuple-element(fusion.2), index=0
gte3 = f32[8,16]{1,0} get-tuple-element(fusion.2), index=1
ROOT root = (f32[8,16]{1,0}, f32[8,16]{1,0}, f32[8,16]{1,0},
f32[8,16]{1,0})
tuple(gte0, gte1, gte2, gte3)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Exp(), m::Add(), m::Broadcast())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingLoopAndMultiOutputLoopDifferentShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,2]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
ROOT reduce = f32[1,5,1,2]{3,2,1,0} reduce(p0.2, const.2),
dimensions={0,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = f32[1,5,1,2]{3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}, f32[1,5,1,2]{3,2,1,0})
tuple(gte0, gte1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, SiblingFusionBitcastAndLoopFusionNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation_1 {
p0.1 = f32[2048,16000]{1,0} parameter(0)
bitcast = f32[2048,1,16000]{2,1,0} bitcast(p0.1)
ROOT exp = f32[2048,1,16000]{2,1,0} exponential(bitcast)
}
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
fusion = f32[2048,1,16000]{2,1,0} fusion(param_0), kind=kLoop, calls=fused_computation_1
bitcast = f32[16000,1,2048]{2,1,0} bitcast(param_0)
ROOT tuple.143 = (f32[16000,1,2048]{2,1,0}, f32[2048,1,16000]{2,1,0}) tuple(bitcast, fusion)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionBitcastAndElementwiseNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
convert = bf16[2048,16000]{1,0} convert(param_0)
bitcast = bf16[16000,1,2048]{2,1,0} bitcast(convert)
ROOT tuple.143 = (bf16[16000,1,2048]{2,1,0}, bf16[2048,16000]{1,0}) tuple(bitcast, convert)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionElementwiseAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add_computation
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Exp())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add
reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Add())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},
to_apply=scalar_add_computation
mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add_computation
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32,32]{2,1,0})
tuple(gte1, gte1, select)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(), m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Select())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1)
}
fused_reduce {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2,
f32[2,2,2]{2,1,0} p0.2)
broadcast = f32[2,2,2,2]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
c1 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2,2]{3,2,1,0} broadcast,
f32[] c1), dimensions={1,3}, to_apply=scalar_add_computation
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise
fusion = f32[2,2]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce
ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionFp16LoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f16[32,32,32]{2,1,0} parameter(1)
c0 = f16[] constant(0)
broadcast = f16[32,32,32]{2,1,0} broadcast(f16[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f16[32,32,32]{2,1,0} p1.1,
f16[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f16[32,32,32]{2,1,0} parameter(0)
ROOT select = f16[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f16[32,32,32]{2,1,0} p0.1, f16[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f16[32,32,32]{2,1,0} parameter(0)
convert = f32[32,32,32]{2,1,0} convert(p0.2)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(convert, c1), dimensions={2},
to_apply=scalar_add_computation
mul = f32[32,32,32]{2,1,0} multiply(convert, convert)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add_computation
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f16[32,32,32]{2,1,0} parameter(0)
p1 = f16[32,32,32]{2,1,0} parameter(1)
select = f16[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f16[32,32,32]{2,1,0})
tuple(gte1, gte1, select)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(), m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Select())));
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionReduceUnfriendlyLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,32,32,1024]{3,2,1,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
transpose = f16[128,32,32,1024]{3,2,1,0} transpose(p1.1), dimensions={0,2,3,1}
c0 = f16[] constant(0)
broadcast = f16[128,32,32,1024]{3,2,1,0} broadcast(c0), dimensions={}
greater-than = pred[128,32,32,1024]{3,2,1,0} compare(transpose, broadcast), direction=GT
ROOT root = f16[128,32,32,1024]{3,2,1,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,32,32,1024]{3,2,1,0} parameter(0)
convert = f32[128,32,32,1024]{3,2,1,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,1,2}, to_apply=scalar_add_computation
}
ENTRY reduce {
p0 = f16[128,32,32,1024]{3,2,1,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,32,32,1024]{3,2,1,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,32,32,1024]{3,2,1,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionAvoidsCycles) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0, p1)
}
fused_mul {
p2 = f32[64,64,64]{2,1,0} parameter(0)
p3 = f32[64,64,64]{2,1,0} parameter(1)
ROOT multiply = f32[64,64,64]{2,1,0} multiply(p2, p3)
}
fused_reduce_1 {
p4 = f32[32,32,32]{2,1,0} parameter(0)
p5 = f32[64,64,64]{2,1,0} parameter(1)
slice = f32[32,32,32]{2,1,0} slice(p5), slice={[0:32], [0:32], [0:32]}
add = f32[32,32,32]{2,1,0} add(p4, slice)
c0 = f32[] constant(0)
ROOT r1 = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
}
fused_reduce_2 {
p6 = f32[32,32,32]{2,1,0} parameter(0)
p7 = f32[64,64,64]{2,1,0} parameter(1)
c0 = f32[] constant(0)
pad = f32[64,64,64]{2,1,0} pad(p6, c0), padding=16_16x16_16x16_16
mul = f32[64,64,64]{2,1,0} multiply(pad, p7)
ROOT r1 = f32[64,64]{1,0} reduce(mul, c0), dimensions={2},
to_apply=scalar_add_computation
}
ENTRY reduce {
p8 = f32[32,32,32]{2,1,0} parameter(0)
p9 = f32[64,64,64]{2,1,0} parameter(1)
add = f32[32,32,32]{2,1,0} fusion(p8, p8), kind=kLoop, calls=fused_add
mul = f32[64,64,64]{2,1,0} fusion(p9, p9), kind=kLoop, calls=fused_mul
reduce1 = f32[32,32]{1,0} fusion(add, mul), kind=kInput,
calls=fused_reduce_1
reduce2 = f32[64,64]{1,0} fusion(add, mul), kind=kInput,
calls=fused_reduce_2
ROOT root = (f32[32,32,32]{2,1,0}, f32[32,32]{1,0}, f32[64,64]{1,0},
f32[64,64,64]{2,1,0}) tuple(add, reduce1, reduce2, mul)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
EXPECT_EQ(1, CountMultiOutputFusions(module.get()));
}
TEST_F(MultiOutputFusionTest, PreferFuseProducerIntoFusionConsumer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0, p1)
}
fused_reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[64,64,64]{2,1,0} parameter(1)
slice = f32[32,32,32]{2,1,0} slice(p1), slice={[0:32], [0:32], [0:32]}
add = f32[32,32,32]{2,1,0} add(p0, slice)
c0 = f32[] constant(0)
ROOT r1 = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[64,64,64]{2,1,0} parameter(1)
add = f32[32,32,32]{2,1,0} fusion(p0, p0), kind=kLoop, calls=fused_add
c0 = f32[] constant(0)
reduce2 = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
reduce = f32[32,32]{1,0} fusion(add, p1), kind=kInput, calls=fused_reduce
ROOT root = (f32[32,32,32]{2,1,0}, f32[32,32]{1,0}, f32[32,32]{1,0})
tuple(add, reduce, reduce2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
int multi_output_fusion_count = 0;
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instr : computation->instructions()) {
if (instr->IsMultiOutputFusion()) {
multi_output_fusion_count++;
}
}
}
EXPECT_EQ(1, multi_output_fusion_count);
}
TEST_F(MultiOutputFusionTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = 200;
ASSERT_GT(kNumParams, MaxOperandsAndOutputsPerFusion());
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> params;
for (int64_t i = 0; i < kNumParams; ++i) {
params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](HloInstruction* x, HloInstruction* y) {
HloComputation::Builder sub_builder("subcomp");
auto* p0 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p"));
auto* p1 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p"));
sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {x, y}, subcomp);
};
auto* sum = b.AddInstruction(make_fusion(params[0], params[1]));
for (int64_t i = 2; i < kNumParams; ++i) {
sum = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, sum,
b.AddInstruction(make_fusion(params[i - 1], params[i]))));
}
auto computation = module->AddEntryComputation(b.Build());
EXPECT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
for (const HloInstruction* instr : computation->instructions()) {
EXPECT_LE(instr->operand_count() + ShapeUtil::SubshapeCount(instr->shape()),
MaxOperandsAndOutputsPerFusion())
<< instr->ToString();
}
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDUS) {
auto module = ParseAndReturnVerifiedModule(R"(HloModule dus_mof
fusion.1 {
p.0 = f16[50,96,1024]{2,1,0} parameter(0)
p.1 = f16[1,96,1024]{2,1,0} parameter(1)
c.0 = s32[3]{0} constant({0, 0, 0})
ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
}
fusion.2 {
p.0 = f16[50,96,1024]{2,1,0} parameter(0)
p.1 = f16[1,96,1024]{2,1,0} parameter(1)
c.0 = s32[3]{0} constant({0, 0, 0})
ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
}
ENTRY entry {
p.00 = f16[50,96,1024]{2,1,0} parameter(0)
p.01 = f16[50,96,1024]{2,1,0} parameter(1)
p.1 = f16[1,96,1024]{2,1,0} parameter(2)
f1 = f16[50,96,1024] fusion(p.00, p.1), kind=kLoop, calls=fusion.1
f2 = f16[50,96,1024] fusion(p.01, p.1), kind=kLoop, calls=fusion.2
ROOT tuple = (f16[50,96,1024],f16[50,96,1024]) tuple(f1, f2)
})")
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, SharedMemoryBudget) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation0 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation1 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation2 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation3 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation4 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation5 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation6 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation7 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation8 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation9 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
ENTRY computation {
zero = f32[] constant(0)
param0 = f32[64,64] parameter(0)
param1 = f32[64,64] parameter(1)
param2 = f32[64,64] parameter(2)
param3 = f32[64,64] parameter(3)
param4 = f32[64,64] parameter(4)
param5 = f32[64,64] parameter(5)
param6 = f32[64,64] parameter(6)
param7 = f32[64,64] parameter(7)
param8 = f32[64,64] parameter(8)
param9 = f32[64,64] parameter(9)
out0 = f32[64] fusion(param0, param1, zero), kind=kInput, calls=fused_computation0
out1 = f32[64] fusion(param1, param2, zero), kind=kInput, calls=fused_computation1
out2 = f32[64] fusion(param2, param3, zero), kind=kInput, calls=fused_computation2
out3 = f32[64] fusion(param3, param4, zero), kind=kInput, calls=fused_computation3
out4 = f32[64] fusion(param4, param5, zero), kind=kInput, calls=fused_computation4
out5 = f32[64] fusion(param5, param6, zero), kind=kInput, calls=fused_computation5
out6 = f32[64] fusion(param6, param7, zero), kind=kInput, calls=fused_computation6
out7 = f32[64] fusion(param7, param8, zero), kind=kInput, calls=fused_computation7
out8 = f32[64] fusion(param8, param9, zero), kind=kInput, calls=fused_computation8
out9 = f32[64] fusion(param9, param0, zero), kind=kInput, calls=fused_computation9
ROOT out = (f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64]) tuple(f32[64] out0, f32[64] out1, f32[64] out2, f32[64] out3, f32[64] out4, f32[64] out5, f32[64] out6, f32[64] out7, f32[64] out8, f32[64] out9)
}
)"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(5, CountMultiOutputFusions(module.get()));
}
TEST_F(MultiOutputFusionTest, DoNotGroupTooManyReductions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation0 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation1 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation2 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation3 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation4 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation5 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation6 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation7 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation8 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation9 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
ENTRY computation {
zero = f32[] constant(0)
param0 = f32[64,64] parameter(0)
param1 = f32[64,64] parameter(1)
param2 = f32[64,64] parameter(2)
param3 = f32[64,64] parameter(3)
param4 = f32[64,64] parameter(4)
param5 = f32[64,64] parameter(5)
param6 = f32[64,64] parameter(6)
param7 = f32[64,64] parameter(7)
param8 = f32[64,64] parameter(8)
param9 = f32[64,64] parameter(9)
out0 = f32[64] fusion(param0, param1, zero), kind=kInput, calls=fused_computation0
out1 = f32[64] fusion(param1, param2, zero), kind=kInput, calls=fused_computation1
out2 = f32[64] fusion(param2, param3, zero), kind=kInput, calls=fused_computation2
out3 = f32[64] fusion(param3, param4, zero), kind=kInput, calls=fused_computation3
out4 = f32[64] fusion(param4, param5, zero), kind=kInput, calls=fused_computation4
out5 = f32[64] fusion(param5, param6, zero), kind=kInput, calls=fused_computation5
out6 = f32[64] fusion(param6, param7, zero), kind=kInput, calls=fused_computation6
out7 = f32[64] fusion(param7, param8, zero), kind=kInput, calls=fused_computation7
out8 = f32[64] fusion(param8, param9, zero), kind=kInput, calls=fused_computation8
out9 = f32[64] fusion(param9, param0, zero), kind=kInput, calls=fused_computation9
ROOT out = (f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64]) tuple(f32[64] out0, f32[64] out1, f32[64] out2, f32[64] out3, f32[64] out4, f32[64] out5, f32[64] out6, f32[64] out7, f32[64] out8, f32[64] out9)
}
)"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(2, CountMultiOutputFusions(module.get()));
}
TEST_F(MultiOutputFusionTest, NoFusionToAvoidUsingTooMuchSharedMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule xla_computation_update_step.10931
%scalar_add_computation.1 (scalar_lhs.1: f64[], scalar_rhs.1: f64[]) -> f64[] {
%scalar_lhs.1 = f64[] parameter(0)
%scalar_rhs.1 = f64[] parameter(1)
ROOT %add.1257 = f64[] add(f64[] %scalar_lhs.1, f64[] %scalar_rhs.1)
}
%fused_computation.1 (param_0.8: f64[64,64], param_1.11: f64[64,64], param_2.9: f64[64,64]) -> (f64[64], f64[64]) {
%param_0.8 = f64[64,64]{1,0} parameter(0)
%param_1.11 = f64[64,64]{1,0} parameter(1)
%multiply.2 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %param_0.8, f64[64,64]{1,0} %param_1.11)
%constant_5217.3 = f64[] constant(0)
%broadcast.1 = f64[64,64]{1,0} broadcast(f64[] %constant_5217.3), dimensions={}
%multiply.0 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %multiply.2, f64[64,64]{1,0} %broadcast.1)
%reduce.0 = f64[64]{0} reduce(f64[64,64]{1,0} %multiply.0, f64[] %constant_5217.3), dimensions={0}, to_apply=%scalar_add_computation.1
%param_2.9 = f64[64,64]{1,0} parameter(2)
%multiply.1514.clone.0.clone.1 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %param_2.9, f64[64,64]{1,0} %param_1.11)
%constant_5217.1.clone.1 = f64[] constant(0)
%broadcast.0.clone.1 = f64[64,64]{1,0} broadcast(f64[] %constant_5217.1.clone.1), dimensions={}
%multiply.1341.clone.0.clone.1 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %multiply.1514.clone.0.clone.1, f64[64,64]{1,0} %broadcast.0.clone.1)
%reduce.630.clone.0.clone.1 = f64[64]{0} reduce(f64[64,64]{1,0} %multiply.1341.clone.0.clone.1, f64[] %constant_5217.1.clone.1), dimensions={0}, to_apply=%scalar_add_computation.1
ROOT %tuple = (f64[64]{0}, f64[64]{0}) tuple(f64[64]{0} %reduce.0, f64[64]{0} %reduce.630.clone.0.clone.1)
}
%primitive_computation_add__1.6426 (parameter.6427: f64[], parameter.6428: f64[]) -> f64[] {
%parameter.6427 = f64[] parameter(0)
%parameter.6428 = f64[] parameter(1)
ROOT %add.6429 = f64[] add(f64[] %parameter.6427, f64[] %parameter.6428)
}
%fused_computation.2 (param_0.7: f64[64,64], param_1.9: f64[64,64]) -> f64[64] {
%param_0.7 = f64[64,64]{1,0} parameter(0)
%param_1.9 = f64[64,64]{1,0} parameter(1)
%multiply.1 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %param_0.7, f64[64,64]{1,0} %param_1.9)
%constant_5217.2 = f64[] constant(0)
ROOT %reduce.740.clone.0 = f64[64]{0} reduce(f64[64,64]{1,0} %multiply.1, f64[] %constant_5217.2), dimensions={0}, to_apply=%primitive_computation_add__1.6426
}
ENTRY %reproducer (param_0.1090: f64[64,64], param_1.1377: f64[64,64], param_2.1948: f64[64,64]) -> (f64[64], f64[64], f64[64]) {
%param_0.1090 = f64[64,64]{1,0} parameter(0)
%param_1.1377 = f64[64,64]{1,0} parameter(1)
%param_2.1948 = f64[64,64]{1,0} parameter(2)
%fusion.1 = (f64[64]{0}, f64[64]{0}) fusion(f64[64,64]{1,0} %param_0.1090, f64[64,64]{1,0} %param_1.1377, f64[64,64]{1,0} %param_2.1948), kind=kInput, calls=%fused_computation.1
%get-tuple-element = f64[64]{0} get-tuple-element((f64[64]{0}, f64[64]{0}) %fusion.1), index=0
%fusion.2 = f64[64]{0} fusion(f64[64,64]{1,0} %param_0.1090, f64[64,64]{1,0} %param_1.1377), kind=kInput, calls=%fused_computation.2
%get-tuple-element.1 = f64[64]{0} get-tuple-element((f64[64]{0}, f64[64]{0}) %fusion.1), index=1
ROOT %tuple.428 = (f64[64]{0}, f64[64]{0}, f64[64]{0}) tuple(f64[64]{0} %get-tuple-element, f64[64]{0} %fusion.2, f64[64]{0} %get-tuple-element.1)
}
)")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, NoFusionToAvoidCodeDuplication) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
and.reduce_sub_computation {
x = pred[] parameter(0)
y = pred[] parameter(1)
ROOT and = pred[] and(x, y)
}
fused_computation.1 {
param_4.658 = f32[2,20,256]{2,0,1} parameter(4)
slice.1385 = f32[2,1,256]{2,0,1} slice(param_4.658), slice={[0:2], [11:12], [0:256]}
constant.6847 = s32[] constant(0)
broadcast.4823 = s32[3]{0} broadcast(constant.6847), dimensions={}
param_9.415 = s32[3]{0} parameter(9)
compare.700 = pred[3]{0} compare(broadcast.4823, param_9.415), direction=LE
constant.6846 = pred[] constant(true)
reduce.221 = pred[] reduce(compare.700, constant.6846), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2933 = pred[2,1,256]{2,0,1} broadcast(reduce.221), dimensions={}
param_5.528 = f32[2,512]{1,0} parameter(5)
slice.1384 = f32[2,256]{1,0} slice(param_5.528), slice={[0:2], [0:256]}
bitcast.341 = f32[2,1,256]{2,0,1} bitcast(slice.1384)
constant.5418 = f32[] constant(0)
broadcast.3227 = f32[2,1,256]{2,0,1} broadcast(constant.5418), dimensions={}
select.173 = f32[2,1,256]{2,0,1} select(broadcast.2933, bitcast.341, broadcast.3227)
add.573 = f32[2,1,256]{2,0,1} add(slice.1385, select.173)
param_0.299 = s32[] parameter(0)
constant.5157 = s32[] constant(11)
dynamic-update-slice.189 = f32[2,20,256]{2,0,1} dynamic-update-slice(param_4.658, add.573, param_0.299, constant.5157, param_0.299)
slice.1383 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.189), slice={[0:2], [10:11], [0:256]}
constant.6800 = s32[] constant(0)
broadcast.4803 = s32[3]{0} broadcast(constant.6800), dimensions={}
param_8.484 = s32[3]{0} parameter(8)
compare.681 = pred[3]{0} compare(broadcast.4803, param_8.484), direction=LE
constant.6798 = pred[] constant(true)
reduce.203 = pred[] reduce(compare.681, constant.6798), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2932 = pred[2,1,256]{2,0,1} broadcast(reduce.203), dimensions={}
param_3.1169 = f32[2,512]{1,0} parameter(3)
slice.1382 = f32[2,256]{1,0} slice(param_3.1169), slice={[0:2], [0:256]}
bitcast.340 = f32[2,1,256]{2,0,1} bitcast(slice.1382)
select.172 = f32[2,1,256]{2,0,1} select(broadcast.2932, bitcast.340, broadcast.3227)
add.572 = f32[2,1,256]{2,0,1} add(slice.1383, select.172)
constant.5154 = s32[] constant(10)
dynamic-update-slice.188 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.189, add.572, param_0.299, constant.5154, param_0.299)
slice.1381 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.188), slice={[0:2], [9:10], [0:256]}
constant.6794 = s32[] constant(0)
broadcast.4801 = s32[3]{0} broadcast(constant.6794), dimensions={}
param_7.478 = s32[3]{0} parameter(7)
compare.679 = pred[3]{0} compare(broadcast.4801, param_7.478), direction=LE
constant.6793 = pred[] constant(true)
reduce.201 = pred[] reduce(compare.679, constant.6793), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2930 = pred[2,1,256]{2,0,1} broadcast(reduce.201), dimensions={}
param_2.1685 = f32[2,512]{1,0} parameter(2)
slice.1380 = f32[2,256]{1,0} slice(param_2.1685), slice={[0:2], [0:256]}
bitcast.339 = f32[2,1,256]{2,0,1} bitcast(slice.1380)
select.171 = f32[2,1,256]{2,0,1} select(broadcast.2930, bitcast.339, broadcast.3227)
add.571 = f32[2,1,256]{2,0,1} add(slice.1381, select.171)
constant.5153 = s32[] constant(9)
dynamic-update-slice.187 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.188, add.571, param_0.299, constant.5153, param_0.299)
slice.1379 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.187), slice={[0:2], [8:9], [0:256]}
constant.6788 = s32[] constant(0)
broadcast.4799 = s32[3]{0} broadcast(constant.6788), dimensions={}
param_6.495 = s32[3]{0} parameter(6)
compare.677 = pred[3]{0} compare(broadcast.4799, param_6.495), direction=LE
constant.6786 = pred[] constant(true)
reduce.199 = pred[] reduce(compare.677, constant.6786), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2929 = pred[2,1,256]{2,0,1} broadcast(reduce.199), dimensions={}
param_1.1408 = f32[2,512]{1,0} parameter(1)
slice.1378 = f32[2,256]{1,0} slice(param_1.1408), slice={[0:2], [0:256]}
bitcast.338 = f32[2,1,256]{2,0,1} bitcast(slice.1378)
select.170 = f32[2,1,256]{2,0,1} select(broadcast.2929, bitcast.338, broadcast.3227)
add.570 = f32[2,1,256]{2,0,1} add(slice.1379, select.170)
constant.5152 = s32[] constant(8)
ROOT dynamic-update-slice.186 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.187, add.570, param_0.299, constant.5152, param_0.299)
}
fused_computation.2 {
param_4.655 = f32[2,20,256]{2,0,1} parameter(4)
slice.1369 = f32[2,1,256]{2,0,1} slice(param_4.655), slice={[0:2], [7:8], [0:256]}
param_6.483 = pred[] parameter(6)
broadcast.2927 = pred[2,1,256]{2,0,1} broadcast(param_6.483), dimensions={}
param_5.525 = f32[2,512]{1,0} parameter(5)
slice.1368 = f32[2,256]{1,0} slice(param_5.525), slice={[0:2], [0:256]}
bitcast.333 = f32[2,1,256]{2,0,1} bitcast(slice.1368)
constant.5415 = f32[] constant(0)
broadcast.3225 = f32[2,1,256]{2,0,1} broadcast(constant.5415), dimensions={}
select.161 = f32[2,1,256]{2,0,1} select(broadcast.2927, bitcast.333, broadcast.3225)
add.549 = f32[2,1,256]{2,0,1} add(slice.1369, select.161)
param_0.265 = s32[] parameter(0)
constant.5151 = s32[] constant(7)
dynamic-update-slice.185 = f32[2,20,256]{2,0,1} dynamic-update-slice(param_4.655, add.549, param_0.265, constant.5151, param_0.265)
slice.1367 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.185), slice={[0:2], [6:7], [0:256]}
constant.6782 = s32[] constant(0)
broadcast.4797 = s32[3]{0} broadcast(constant.6782), dimensions={}
param_9.391 = s32[3]{0} parameter(9)
compare.675 = pred[3]{0} compare(broadcast.4797, param_9.391), direction=LE
constant.6781 = pred[] constant(true)
reduce.197 = pred[] reduce(compare.675, constant.6781), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2926 = pred[2,1,256]{2,0,1} broadcast(reduce.197), dimensions={}
param_3.1167 = f32[2,512]{1,0} parameter(3)
slice.1366 = f32[2,256]{1,0} slice(param_3.1167), slice={[0:2], [0:256]}
bitcast.332 = f32[2,1,256]{2,0,1} bitcast(slice.1366)
select.160 = f32[2,1,256]{2,0,1} select(broadcast.2926, bitcast.332, broadcast.3225)
add.548 = f32[2,1,256]{2,0,1} add(slice.1367, select.160)
constant.5150 = s32[] constant(6)
dynamic-update-slice.184 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.185, add.548, param_0.265, constant.5150, param_0.265)
slice.1365 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.184), slice={[0:2], [5:6], [0:256]}
constant.6776 = s32[] constant(0)
broadcast.4794 = s32[3]{0} broadcast(constant.6776), dimensions={}
param_8.464 = s32[3]{0} parameter(8)
compare.673 = pred[3]{0} compare(broadcast.4794, param_8.464), direction=LE
constant.6775 = pred[] constant(true)
reduce.195 = pred[] reduce(compare.673, constant.6775), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2925 = pred[2,1,256]{2,0,1} broadcast(reduce.195), dimensions={}
param_2.1684 = f32[2,512]{1,0} parameter(2)
slice.1364 = f32[2,256]{1,0} slice(param_2.1684), slice={[0:2], [0:256]}
bitcast.331 = f32[2,1,256]{2,0,1} bitcast(slice.1364)
select.159 = f32[2,1,256]{2,0,1} select(broadcast.2925, bitcast.331, broadcast.3225)
add.547 = f32[2,1,256]{2,0,1} add(slice.1365, select.159)
constant.5149 = s32[] constant(5)
dynamic-update-slice.183 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.184, add.547, param_0.265, constant.5149, param_0.265)
slice.1363 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.183), slice={[0:2], [4:5], [0:256]}
constant.6770 = s32[] constant(0)
broadcast.4792 = s32[3]{0} broadcast(constant.6770), dimensions={}
param_7.458 = s32[3]{0} parameter(7)
compare.671 = pred[3]{0} compare(broadcast.4792, param_7.458), direction=LE
constant.6769 = pred[] constant(true)
reduce.193 = pred[] reduce(compare.671, constant.6769), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2924 = pred[2,1,256]{2,0,1} broadcast(reduce.193), dimensions={}
param_1.1405 = f32[2,512]{1,0} parameter(1)
slice.1362 = f32[2,256]{1,0} slice(param_1.1405), slice={[0:2], [0:256]}
bitcast.330 = f32[2,1,256]{2,0,1} bitcast(slice.1362)
select.158 = f32[2,1,256]{2,0,1} select(broadcast.2924, bitcast.330, broadcast.3225)
add.546 = f32[2,1,256]{2,0,1} add(slice.1363, select.158)
constant.5148 = s32[] constant(4)
ROOT dynamic-update-slice.182 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.183, add.546, param_0.265, constant.5148, param_0.265)
}
ENTRY main {
param_0.0 = s32[] parameter(0)
param_1.0 = f32[2,512]{1,0} parameter(1)
param_2.0 = f32[2,512]{1,0} parameter(2)
param_3.0 = f32[2,512]{1,0} parameter(3)
param_4.0 = f32[2,20,256]{2,1,0} parameter(4)
param_5.0 = f32[2,512]{1,0} parameter(5)
param_6.0 = s32[3]{0} parameter(6)
param_7.0 = s32[3]{0} parameter(7)
param_8.0 = s32[3]{0} parameter(8)
param_9.0 = s32[3]{0} parameter(9)
fusion.1 = f32[2,20,256]{2,0,1} fusion(param_0.0, param_1.0, param_2.0, param_3.0, param_4.0, param_5.0, param_6.0, param_7.0, param_8.0, param_9.0), kind=kLoop, calls=fused_computation.1
param_10 = pred[] parameter(10)
fusion.2 = f32[2,20,256]{2,0,1} fusion(param_0.0, param_1.0, param_2.0, param_3.0, fusion.1, param_5.0, param_10, param_7.0, param_8.0, param_9.0), kind=kLoop, calls=fused_computation.2
ROOT root = (f32[2,20,256]{2,0,1}, f32[2,20,256]{2,0,1}) tuple(fusion.1, fusion.2)
}
)")
.value();
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, DoNotFuseRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
no_op {
arg_empty_tuple = () parameter(0)
ROOT tuple = () tuple()
}
fused_computation {
param_0 = f32[] parameter(0)
ROOT convert = s32[] convert(param_0)
}
ENTRY main {
param_0 = f32[] parameter(0)
fusion = s32[] fusion(param_0), kind=kLoop, calls=fused_computation
tuple = () tuple()
conditional = () conditional(fusion, tuple, tuple), branch_computations={no_op, no_op}
constant = f32[] constant(1)
ROOT root = f32[] add(param_0, constant)
}
)")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, CostBasedNoMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
region_3.63 {
Arg_0.64 = f32[] parameter(0)
Arg_1.65 = f32[] parameter(1)
ROOT add.66 = f32[] add(Arg_0.64, Arg_1.65)
}
fused_computation.29 {
param_0.161 = f32[5,32,32,1]{3,2,1,0} parameter(0)
multiply.208 = f32[5,32,32,1]{3,2,1,0} multiply(param_0.161, param_0.161)
bitcast.67 = f32[5,32,32]{2,1,0} bitcast(multiply.208)
constant.265 = f32[] constant(0)
reduce-window.81 = f32[5,30,31]{2,1,0} reduce-window(bitcast.67, constant.265), window={size=1x3x2}, to_apply=region_3.63
constant.264 = f32[] constant(0.166666672)
broadcast.204 = f32[5,30,31]{2,1,0} broadcast(constant.264), dimensions={}
multiply.205 = f32[5,30,31]{2,1,0} multiply(reduce-window.81, broadcast.204)
constant.263 = f32[] constant(0)
reduce-window.80 = f32[5,30,31]{2,1,0} reduce-window(multiply.205, constant.263), window={size=1x2x3 pad=0_0x0_1x1_1}, to_apply=region_3.63
constant.262 = f32[] constant(0.0138888899)
broadcast.201 = f32[5,30,31]{2,1,0} broadcast(constant.262), dimensions={}
multiply.204 = f32[5,30,31]{2,1,0} multiply(reduce-window.80, broadcast.201)
constant.261 = f32[] constant(0)
reduce-window.78 = f32[5,30,31]{2,1,0} reduce-window(multiply.204, constant.261), window={size=1x1x2 pad=0_0x0_0x0_1}, to_apply=region_3.63
constant.113 = f32[] constant(0.5)
broadcast.137 = f32[5,30,31]{2,1,0} broadcast(constant.113), dimensions={}
multiply.125 = f32[5,30,31]{2,1,0} multiply(reduce-window.78, broadcast.137)
constant.114 = f32[] constant(0)
ROOT reduce-window.17 = f32[5,30,31]{2,1,0} reduce-window(multiply.125, constant.114), window={size=1x2x1 pad=0_0x0_1x0_0}, to_apply=region_3.63
}
fused_computation.15 {
constant.108 = f32[] constant(0.5)
broadcast.105 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.108), dimensions={}
param_3.126 = f32[5,30,31]{2,1,0} parameter(3)
constant.295 = f32[] constant(0.25)
broadcast.234 = f32[5,30,31]{2,1,0} broadcast(constant.295), dimensions={}
multiply.242 = f32[5,30,31]{2,1,0} multiply(param_3.126, broadcast.234)
broadcast.233 = f32[5,5,30,31]{3,2,1,0} broadcast(multiply.242), dimensions={0,2,3}
param_2.154 = f32[5,30,31]{2,1,0} parameter(2)
multiply.241 = f32[5,30,31]{2,1,0} multiply(param_2.154, broadcast.234)
broadcast.232 = f32[5,5,30,31]{3,2,1,0} broadcast(multiply.241), dimensions={1,2,3}
multiply.240 = f32[5,5,30,31]{3,2,1,0} multiply(broadcast.233, broadcast.232)
param_1.188 = f32[5,5,30,31]{3,2,1,0} parameter(1)
constant.294 = f32[] constant(0.159154937)
broadcast.231 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.294), dimensions={}
multiply.239 = f32[5,5,30,31]{3,2,1,0} multiply(param_1.188, broadcast.231)
param_0.164 = f32[5,5,30,31]{3,2,1,0} parameter(0)
add.19 = f32[5,5,30,31]{3,2,1,0} add(multiply.239, param_0.164)
constant.293 = f32[] constant(0)
reduce-window.90 = f32[5,5,30,31]{3,2,1,0} reduce-window(add.19, constant.293), window={size=1x1x1x2 pad=0_0x0_0x0_0x0_1}, to_apply=region_3.63
constant.292 = f32[] constant(0.5)
broadcast.230 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.292), dimensions={}
multiply.238 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.90, broadcast.230)
constant.291 = f32[] constant(0)
reduce-window.89 = f32[5,5,30,31]{3,2,1,0} reduce-window(multiply.238, constant.291), window={size=1x1x2x1 pad=0_0x0_0x0_1x0_0}, to_apply=region_3.63
constant.290 = f32[] constant(0.25)
broadcast.229 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.290), dimensions={}
multiply.237 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.89, broadcast.229)
multiply.236 = f32[5,5,30,31]{3,2,1,0} multiply(multiply.237, multiply.237)
subtract.10 = f32[5,5,30,31]{3,2,1,0} subtract(multiply.240, multiply.236)
constant.289 = f32[] constant(0)
broadcast.228 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.289), dimensions={}
maximum.6 = f32[5,5,30,31]{3,2,1,0} maximum(subtract.10, broadcast.228)
sqrt.6 = f32[5,5,30,31]{3,2,1,0} sqrt(maximum.6)
constant.110 = f32[] constant(0)
broadcast.107 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.110), dimensions={}
compare.4 = pred[5,5,30,31]{3,2,1,0} compare(sqrt.6, broadcast.107), direction=EQ
constant.243 = f32[] constant(0.159154937)
broadcast.193 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.243), dimensions={}
multiply.194 = f32[5,5,30,31]{3,2,1,0} multiply(param_1.188, broadcast.193)
add.15 = f32[5,5,30,31]{3,2,1,0} add(multiply.194, param_0.164)
constant.242 = f32[] constant(0)
reduce-window.66 = f32[5,5,30,31]{3,2,1,0} reduce-window(add.15, constant.242), window={size=1x1x1x2 pad=0_0x0_0x0_0x0_1}, to_apply=region_3.63
constant.241 = f32[] constant(0.5)
broadcast.192 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.241), dimensions={}
multiply.193 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.66, broadcast.192)
constant.240 = f32[] constant(0)
reduce-window.65 = f32[5,5,30,31]{3,2,1,0} reduce-window(multiply.193, constant.240), window={size=1x1x2x1 pad=0_0x0_0x0_1x0_0}, to_apply=region_3.63
constant.239 = f32[] constant(0.25)
broadcast.191 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.239), dimensions={}
multiply.192 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.65, broadcast.191)
compare.3 = pred[5,5,30,31]{3,2,1,0} compare(multiply.192, broadcast.107), direction=EQ
and.1 = pred[5,5,30,31]{3,2,1,0} and(compare.4, compare.3)
constant.109 = f32[] constant(1.57079637)
broadcast.104 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.109), dimensions={}
atan2.1 = f32[5,5,30,31]{3,2,1,0} atan2(sqrt.6, multiply.192)
select.4 = f32[5,5,30,31]{3,2,1,0} select(and.1, broadcast.104, atan2.1)
constant.107 = f32[] constant(0.159154937)
broadcast.106 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.107), dimensions={}
multiply.100 = f32[5,5,30,31]{3,2,1,0} multiply(select.4, broadcast.106)
ROOT subtract.3 = f32[5,5,30,31]{3,2,1,0} subtract(broadcast.105, multiply.100)
}
fused_computation.4 {
param_0.172 = f32[5,30,31]{2,1,0} parameter(0)
constant.315 = f32[] constant(0.125)
broadcast.242 = f32[5,30,31]{2,1,0} broadcast(constant.315), dimensions={}
multiply.250 = f32[5,30,31]{2,1,0} multiply(param_0.172, broadcast.242)
constant.314 = f32[] constant(0)
reduce-window.100 = f32[5,30,31]{2,1,0} reduce-window(multiply.250, constant.314), window={size=1x3x3 pad=0_0x1_1x1_1}, to_apply=region_3.63
constant.79 = f32[] constant(0.055555556)
broadcast.85 = f32[5,30,31]{2,1,0} broadcast(constant.79), dimensions={}
multiply.80 = f32[5,30,31]{2,1,0} multiply(reduce-window.100, broadcast.85)
constant.81 = f32[] constant(0)
reduce-window.1 = f32[5,30,31]{2,1,0} reduce-window(multiply.80, constant.81), window={size=1x3x3 pad=0_0x1_1x1_1}, to_apply=region_3.63
constant.80 = f32[] constant(0.111111112)
broadcast.86 = f32[5,30,31]{2,1,0} broadcast(constant.80), dimensions={}
multiply.79 = f32[5,30,31]{2,1,0} multiply(reduce-window.1, broadcast.86)
bitcast.26 = f32[5,930]{1,0} bitcast(multiply.79)
ROOT reduce.8 = f32[5]{0} reduce(bitcast.26, constant.81), dimensions={1}, to_apply=region_3.63
}
ENTRY e {
Arg_0.1 = f32[5,32,32,1]{3,2,1,0} parameter(0)
p1 = f32[5,5,30,31]{3,2,1,0} parameter(1)
p2 = f32[5,5,30,31]{3,2,1,0} parameter(2)
p3 = f32[5,30,31]{2,1,0} parameter(3)
fusion.29 = f32[5,30,31]{2,1,0} fusion(Arg_0.1), kind=kLoop, calls=fused_computation.29
fusion.15 = f32[5,5,30,31]{3,2,1,0} fusion(p2, p1, p3, fusion.29), kind=kLoop, calls=fused_computation.15
ROOT fusion.4 = f32[5]{0} fusion(fusion.29), kind=kInput, calls=fused_computation.4
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, NoOverlappingRead) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation_1 {
p0.1 = f32[100,200]{1,0} parameter(0)
slice.0 = f32[50,100]{1,0} slice(p0.1), slice={[0:50],[0:100]}
mul = f32[50,100]{1,0} multiply(slice.0, slice.0)
exp = f32[50,100]{1,0} exponential(slice.0)
ROOT tuple = (f32[50,100]{1,0}, f32[50,100]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[100,200]{1,0} parameter(0)
slice.1 = f32[50,100]{1,0} slice(p0.2), slice={[0:50],[100:200]}
const.2 = f32[] constant(0)
broadcast = f32[50,100]{1,0} broadcast(const.2), dimensions={}
ROOT add = f32[50,100]{1,0} add(slice.1, broadcast)
}
ENTRY entry {
p0 = f32[100,200]{1,0} parameter(0)
fusion.1 = (f32[50,100]{1,0}, f32[50,100]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
gte0 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=1
fusion.2 = f32[50,100]{1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
ROOT root = (f32[50,100]{1,0}, f32[50,100]{1,0}, f32[50,100]{1,0})
tuple(gte0, gte1, fusion.2)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, OverlappingRead) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation_1 {
p0.1 = f32[100,200]{1,0} parameter(0)
slice.0 = f32[50,100]{1,0} slice(p0.1), slice={[0:50],[50:150]}
mul = f32[50,100]{1,0} multiply(slice.0, slice.0)
exp = f32[50,100]{1,0} exponential(slice.0)
ROOT tuple = (f32[50,100]{1,0}, f32[50,100]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[100,200]{1,0} parameter(0)
slice.1 = f32[50,100]{1,0} slice(p0.2), slice={[30:80],[20:120]}
const.2 = f32[] constant(0)
broadcast = f32[50,100]{1,0} broadcast(const.2), dimensions={}
ROOT add = f32[50,100]{1,0} add(slice.1, broadcast)
}
ENTRY entry {
p0 = f32[100,200]{1,0} parameter(0)
fusion.1 = (f32[50,100]{1,0}, f32[50,100]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
gte0 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=1
fusion.2 = f32[50,100]{1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
ROOT root = (f32[50,100]{1,0}, f32[50,100]{1,0}, f32[50,100]{1,0})
tuple(gte0, gte1, fusion.2)
})")
.value();
EXPECT_TRUE(mof_.Run(module.get()).value());
}
class TransposeMultiOutputFusionTest : public MultiOutputFusionTest {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
MultiOutputFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_mlir_emitter_level(3);
return debug_options;
}
};
TEST_F(TransposeMultiOutputFusionTest, MultipleTransposes) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
t1 = f32[32,16]{1,0} transpose(p), dimensions={1,0}
ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(fusion, t1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(TransposeMultiOutputFusionTest, MultipleTransposesDifferentTypes) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f16[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} convert(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f16[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
t1 = f16[32,16]{1,0} transpose(p), dimensions={1,0}
ROOT t = (f32[32,16]{1,0}, f16[32,16]{1,0}) tuple(fusion, t1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(TransposeMultiOutputFusionTest, TiledReduceTranspose) {
const char* hlo = R"(
HloModule module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = add(lhs, rhs)
}
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
z = f32[] constant(0)
r1 = f32[32]{0} reduce(p, z), dimensions={0}, to_apply=add
ROOT t = (f32[32,16]{1,0}, f32[32]{0}) tuple(fusion, r1)
}
)";
CheckMultiOutputFusion(hlo, std::nullopt);
}
TEST_F(TransposeMultiOutputFusionTest, IncompatibleTransposes) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[18,16,32]{2,1,0} parameter(0)
param_1.1 = f32[32,16,18]{2,1,0} parameter(1)
s.1 = f32[18,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[32,16,18]{2,1,0} transpose(s.1), dimensions={2,1,0}
sub.1 = f32[32,16,18]{2,1,0} subtract(t.1, param_1.1)
exp.1 = f32[32,16,18]{2,1,0} exponential(sub.1)
ROOT add.1 = f32[32,16,18]{2,1,0} add(exp.1, exp.1)
}
fused_computation.2 {
param_0.2 = f32[18,16,32]{2,1,0} parameter(0)
s.2 = f32[18,16,32]{2,1,0} sqrt(param_0.2)
ROOT t.2 = f32[18,32,16]{2,1,0} transpose(s.2), dimensions={0,2,1}
}
ENTRY main {
p = f32[18,16,32]{2,1,0} parameter(0)
p2 = f32[32,16,18]{2,1,0} parameter(1)
fusion = f32[32,16,18]{2,1,0} fusion(p, p2), kind=kLoop, calls=fused_computation
fusion2 = f32[18,32,16]{2,1,0} fusion(p), kind=kInput, calls=fused_computation.2
ROOT t = (f32[32,16,18]{2,1,0}, f32[18,32,16]{2,1,0}) tuple(fusion, fusion2)
}
)";
CheckMultiOutputFusion(hlo, std::nullopt);
}
TEST_F(TransposeMultiOutputFusionTest, TransposesNoCSE) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[18,16,32]{2,1,0} parameter(0)
param_1.1 = f32[32,16,18]{2,1,0} parameter(1)
s.1 = f32[18,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[32,16,18]{2,1,0} transpose(s.1), dimensions={2,1,0}
sub.1 = f32[32,16,18]{2,1,0} subtract(t.1, param_1.1)
exp.1 = f32[32,16,18]{2,1,0} exponential(sub.1)
exp.2 = f32[32,16,18]{2,1,0} exponential(sub.1)
ROOT add.1 = f32[32,16,18]{2,1,0} add(exp.1, exp.2)
}
fused_computation.2 {
param_0.2 = f32[18,16,32]{2,1,0} parameter(0)
s.2 = f32[18,16,32]{2,1,0} sqrt(param_0.2)
ROOT t.2 = f32[18,32,16]{2,1,0} transpose(s.2), dimensions={0,2,1}
}
ENTRY main {
p = f32[18,16,32]{2,1,0} parameter(0)
p2 = f32[32,16,18]{2,1,0} parameter(1)
fusion = f32[32,16,18]{2,1,0} fusion(p, p2), kind=kLoop, calls=fused_computation
fusion2 = f32[18,32,16]{2,1,0} fusion(p), kind=kInput, calls=fused_computation.2
ROOT t = (f32[32,16,18]{2,1,0}, f32[18,32,16]{2,1,0}) tuple(fusion, fusion2)
}
)";
CheckMultiOutputFusion(hlo, std::nullopt);
}
TEST_F(TransposeMultiOutputFusionTest, TransposeAndInput) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
c1 = f32[16,32]{1,0} exponential(p)
ROOT t = (f32[32,16]{1,0}, f32[16,32]{1,0}) tuple(fusion, c1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(TransposeMultiOutputFusionTest, TransposeAndInputEpilogueFusion) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[1,16,32]{2,1,0} parameter(0)
s.1 = f32[1,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[1,32,16]{2,1,0} transpose(s.1), dimensions={0,2,1}
ROOT out = f32[32,16,1]{2,1,0} bitcast(t.1)
}
ENTRY main {
p = f32[1,16,32]{2,1,0} parameter(0)
fusion = f32[32,16,1]{2,1,0} fusion(p), kind=kInput, calls=fused_computation
c1 = f32[1,16,32]{2,1,0} exponential(p)
ROOT t = (f32[32,16,1]{2,1,0}, f32[1,16,32]{2,1,0}) tuple(fusion, c1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
class ReduceMultiOutputFusionTest : public MultiOutputFusionTest {};
TEST_F(ReduceMultiOutputFusionTest, ReduceAndLoop) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a, b)
}
fused_reduction {
p = f32[200] parameter(0)
z = f32[] constant(0)
e = f32[200] exponential(p)
ROOT r = f32[] reduce(e, z), dimensions={0}, to_apply=add
}
fused_elementwise {
p = f32[200] parameter(0)
ROOT r = f32[200] sqrt(p)
}
ENTRY computation {
p = f32[200] parameter(0)
o1 = f32[200] fusion(p), kind=kLoop, calls=fused_elementwise
o2 = f32[] fusion(p), kind=kInput, calls=fused_reduction
ROOT out = (f32[200], f32[]) tuple(o1, o2)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(ReduceMultiOutputFusionTest, ReduceAndLoopDifferentShape) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a, b)
}
fused_reduction {
p = f32[10,20] parameter(0)
z = f32[] constant(0)
e = f32[10,20] exponential(p)
b = f32[200] bitcast(e)
ROOT r = f32[] reduce(b, z), dimensions={0}, to_apply=add
}
fused_elementwise {
p = f32[10,20] parameter(0)
ROOT r = f32[10,20] sqrt(p)
}
ENTRY computation {
p = f32[10,20] parameter(0)
o1 = f32[10,20] fusion(p), kind=kLoop, calls=fused_elementwise
o2 = f32[] fusion(p), kind=kInput, calls=fused_reduction
ROOT out = (f32[10,20], f32[]) tuple(o1, o2)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(ReduceMultiOutputFusionTest, ReduceAndLoopDifferentShapeDifferentType) {
const char* hlo = R"(
HloModule module, entry_computation_layout={(f16[100,200]{1,0},f32[],f32[])->(f16[100,200]{1,0}, f32[])}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
fused_computation {
one_5 = f32[] constant(1)
one_b.5 = f32[100,200]{1,0} broadcast(one_5), dimensions={}
param_1.15 = f16[100,200]{1,0} parameter(1)
c.6 = f32[100,200]{1,0} convert(param_1.15)
param_0.11 = f32[] parameter(0)
b.6 = f32[100,200]{1,0} broadcast(param_0.11), dimensions={}
d.5 = f32[100,200]{1,0} divide(c.6, b.6)
a.6 = f32[100,200]{1,0} add(one_b.5, d.5)
bitcast.1 = f32[20000]{0} bitcast(a.6)
z_1 = f32[] constant(0)
ROOT r.1 = f32[] reduce(bitcast.1, z_1), dimensions={0}, to_apply=max
}
fused_computation.1 {
one_3 = f32[] constant(1)
one_b.3 = f32[100,200]{1,0} broadcast(one_3), dimensions={}
param_2.7 = f16[100,200]{1,0} parameter(2)
c.4 = f32[100,200]{1,0} convert(param_2.7)
param_1.10 = f32[] parameter(1)
b.4 = f32[100,200]{1,0} broadcast(param_1.10), dimensions={}
d.3 = f32[100,200]{1,0} divide(c.4, b.4)
a.4 = f32[100,200]{1,0} add(one_b.3, d.3)
param_0.8 = f32[] parameter(0)
output_scale_broadcast.1 = f32[100,200]{1,0} broadcast(param_0.8), dimensions={}
a_scaled.1 = f32[100,200]{1,0} multiply(a.4, output_scale_broadcast.1)
ROOT a_scaled_converted.1 = f16[100,200]{1,0} convert(a_scaled.1)
}
ENTRY computation {
output_scale = f32[] parameter(2)
input_scale = f32[] parameter(1)
p = f16[100,200]{1,0} parameter(0)
fusion.1 = f16[100,200]{1,0} fusion(output_scale, input_scale, p), kind=kLoop, calls=fused_computation.1
fusion = f32[] fusion(input_scale, p), kind=kInput, calls=fused_computation
ROOT out = (f16[100,200]{1,0}, f32[]) tuple(fusion.1, fusion)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(ReduceMultiOutputFusionTest, GetTupleElementMakeTupleSequence) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
p1 = s32[32] parameter(1)
custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
bitcast = s32[1] bitcast(get-tuple-element.1)
dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)
get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2
ROOT tuple.30 = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)
}
ENTRY entry{
p0 = s32[] parameter(0)
bitcast = s32[32] bitcast(p0)
ROOT address_computation.7.0 = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion
}
)")
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/multi_output_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/multi_output_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e6e6d503-beb5-4872-bfe4-475bd4b96eb3 | cpp | tensorflow/tensorflow | broadcast_canonicalizer | third_party/xla/xla/service/broadcast_canonicalizer.cc | third_party/xla/xla/service/broadcast_canonicalizer_test.cc | #include "xla/service/broadcast_canonicalizer.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
BroadcastCanonicalizer::BroadcastCanonicalizer() {}
absl::StatusOr<bool> BroadcastCanonicalizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto& computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kBroadcast) {
continue;
}
if (absl::c_is_sorted(hlo->dimensions())) {
continue;
}
std::vector<int64_t> new_dims(hlo->dimensions().begin(),
hlo->dimensions().end());
std::vector<int64_t> original_dims(hlo->dimensions().begin(),
hlo->dimensions().end());
std::vector<int64_t> new_broadcast_dims(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
absl::c_sort(new_dims);
const int64_t rank = hlo->shape().rank();
for (int i = 0; i < new_dims.size(); ++i) {
new_broadcast_dims[new_dims[i]] =
hlo->operand(0)->shape().dimensions(i);
}
auto new_broadcast = MakeBroadcastHlo(hlo->mutable_operand(0), new_dims,
new_broadcast_dims);
std::vector<int64_t> transpose_dims(rank);
absl::c_iota(transpose_dims, 0);
for (int i = 0; i < new_dims.size(); ++i) {
transpose_dims[new_dims[i]] = new_dims[std::distance(
original_dims.begin(), absl::c_find(original_dims, new_dims[i]))];
}
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
hlo, HloInstruction::CreateTranspose(hlo->shape(), new_broadcast,
transpose_dims)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/broadcast_canonicalizer.h"
#include <functional>
#include <memory>
#include <optional>
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class BroadcastCanonicalizerTest : public HloTestBase {};
TEST_F(BroadcastCanonicalizerTest, ReshapeBroadcast) {
const char* hlo = R"(
HloModule fusion.1644
ENTRY fusion.1644 {
parameter.2 = f32[2,3,2]{2,1,0} parameter(0)
%broadcast.399 = f32[3,2,8,2]{3,2,1,0} broadcast(%parameter.2), dimensions={1,0,3}
ROOT %reshape.43 = f32[3,16,1,2]{3,2,1,0} reshape(f32[3,2,8,2]{3,2,1,0} %broadcast.399)
}
)";
RunAndFilecheckHloRewrite(hlo, BroadcastCanonicalizer{}, R"(
)");
}
TEST_F(BroadcastCanonicalizerTest, ReshapeBroadcast22) {
const char* hlo = R"(
HloModule fusion.1644
ENTRY fusion.1644 {
parameter.2 = f32[5,6,7]{2,1,0} parameter(0)
%broadcast.399 = f32[8,7,9,5,6]{4,3,2,1,0} broadcast(%parameter.2), dimensions={3,4,1}
ROOT %reshape.43 = f32[8,7,45,1,6]{4,3,2,1,0} reshape(%broadcast.399)
}
)";
RunAndFilecheckHloRewrite(hlo, BroadcastCanonicalizer{}, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/broadcast_canonicalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/broadcast_canonicalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37cd1887-c85b-4f59-ac67-475cfab01dc1 | cpp | tensorflow/tensorflow | all_reduce_contiguous | third_party/xla/xla/service/all_reduce_contiguous.cc | third_party/xla/xla/service/all_reduce_contiguous_test.cc | #include "xla/service/all_reduce_contiguous.h"
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
absl::Status ReplaceWithContiguousAllReduce(
HloAllReduceInstruction* all_reduce) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
int64_t total_size = 0;
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
total_size += num_elements;
}
Shape concat_shape = ShapeUtil::MakeShape(element_type, {total_size});
HloInstruction* concatenated =
computation.AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, flat_operands, 0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
concat_shape, {concatenated}, all_reduce->to_apply(),
all_reduce->device_list(),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs;
outputs.reserve(all_reduce->operand_count());
int64_t offset = 0;
for (int64_t i = 0; i < all_reduce->operand_count(); ++i) {
const Shape& flat_shape = flat_operands[i]->shape();
int64_t end = offset + flat_shape.dimensions(0);
HloInstruction* sliced = computation.AddInstruction(
HloInstruction::CreateSlice(flat_shape, new_all_reduce,
{offset},
{end},
{1}));
outputs.push_back(computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), sliced)));
offset = end;
}
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
all_reduce, HloInstruction::CreateTuple(outputs)));
return absl::OkStatus();
}
}
absl::StatusOr<bool> AllReduceContiguous::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceContiguous";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceContiguous because the module contains all-reduce "
"with constrained layouts";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce &&
instruction->operand_count() > 1) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_RETURN_IF_ERROR(ReplaceWithContiguousAllReduce(all_reduce));
}
return !all_reduces.empty();
}
} | #include "xla/service/all_reduce_contiguous.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllReduceContiguousTest = HloTestBase;
TEST_F(AllReduceContiguousTest, Simple) {
const absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[4,4] parameter(1)
ROOT crs = (f32[128], f32[4,4]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceContiguous pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* root = module->entry_computation()->root_instruction();
auto crs =
AllOf(op::Shape("f32[144]"),
op::AllReduce(op::Concatenate(op::Bitcast(op::Parameter(0)),
op::Bitcast(op::Parameter(1)))));
ASSERT_THAT(
root,
op::Tuple(AllOf(op::Shape("f32[128]"), op::Bitcast(op::Slice(crs))),
AllOf(op::Shape("f32[4,4]"), op::Bitcast(op::Slice(crs)))));
EXPECT_EQ(root->operand(0)->operand(0)->slice_starts(0), 0);
EXPECT_EQ(root->operand(0)->operand(0)->slice_limits(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_starts(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_limits(0), 128 + 4 * 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_contiguous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_contiguous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
748cb136-f55b-4f82-b4cb-af329ea655c6 | cpp | tensorflow/tensorflow | sharding_propagation | third_party/xla/xla/service/sharding_propagation.cc | third_party/xla/xla/service/sharding_propagation_test.cc | #include "xla/service/sharding_propagation.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/spmd/shard_barrier_partitioner.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/sharding_op_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::optional<HloSharding> ReturnImprovedSharding(
HloSharding sharding, HloInstruction* instruction,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding),
instruction->has_sharding() ? &instruction->sharding() : nullptr,
instruction->shape(), may_combine_partial_sharding,
allow_aggressive_resharding);
}
std::optional<HloSharding> ReturnImprovedSubSharding(
HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (instruction->has_sharding()) {
const HloSharding to_improved =
instruction->sharding().GetSubSharding(instruction->shape(), index);
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding), &to_improved,
ShapeUtil::GetSubshape(instruction->shape(), index),
may_combine_partial_sharding, allow_aggressive_resharding);
} else {
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding), nullptr,
ShapeUtil::GetSubshape(instruction->shape(), index),
may_combine_partial_sharding, allow_aggressive_resharding);
}
}
bool MaybeImproveInstructionSharding(HloSharding sharding,
HloInstruction* instruction,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (auto new_sharding = ReturnImprovedSharding(
std::move(sharding), instruction, may_combine_partial_sharding,
allow_aggressive_resharding)) {
instruction->set_sharding(std::move(*new_sharding));
return true;
}
return false;
}
bool MaybeImproveInstructionSubSharding(
HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (instruction->shape().IsTuple()) {
if (auto new_sub_sharding = ReturnImprovedSubSharding(
std::move(sharding), instruction, index,
may_combine_partial_sharding, allow_aggressive_resharding)) {
HloSharding new_sharding =
instruction->has_sharding()
? instruction->sharding()
: HloSharding::Single(instruction->shape(),
HloSharding::Replicate());
ShapeTree<HloSharding> sharding_shape_tree =
new_sharding.GetAsShapeTree(instruction->shape());
*sharding_shape_tree.mutable_element(index) = new_sub_sharding.value();
instruction->set_sharding(HloSharding::Tuple(sharding_shape_tree));
return true;
} else {
return false;
}
}
CHECK(index.size() == 1 && index[0] == 0);
return MaybeImproveInstructionSharding(std::move(sharding), instruction,
may_combine_partial_sharding,
allow_aggressive_resharding);
}
bool IsConvolutionKernelSmall(const HloInstruction* instruction) {
CHECK_EQ(instruction->opcode(), HloOpcode::kConvolution);
const HloInstruction* rhs = instruction->operand(1);
const auto& dnums = instruction->convolution_dimension_numbers();
int64_t kernel_dim_prod = 1;
int64_t output_dim_prod = 1;
for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) {
int64_t kernel_dim =
rhs->shape().dimensions(dnums.kernel_spatial_dimensions(i));
kernel_dim_prod *= kernel_dim;
int64_t output_dim =
instruction->shape().dimensions(dnums.output_spatial_dimensions(i));
output_dim_prod *= output_dim;
if (kernel_dim >= output_dim &&
(i < 2 || kernel_dim > 3 || kernel_dim_prod >= output_dim_prod)) {
return false;
}
}
return true;
}
bool IsPassthroughCustomOps(const HloInstruction* hlo) {
if (hlo->IsCustomCall({"Sharding", "X64Combine", "LayoutConstraint"})) {
return true;
}
if (hlo->operand_count() != 1 || !hlo->shape().IsArray() ||
!hlo->operand(0)->shape().IsArray() ||
hlo->operand(0)->shape().rank() != hlo->shape().rank()) {
return false;
}
return hlo->IsCustomCall(
{"ResizeNearest", "ResizeBilinear", "ResizeNearestGrad",
"ResizeBilinearGrad", "Cholesky",
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget,
host_memory_offload_annotations::kMoveToHostCustomCallTarget});
}
const HloInstruction* PickRepresentativeOperand(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kMap:
case HloOpcode::kPad:
case HloOpcode::kPower:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
if (instruction->operand(0)->has_sharding()) {
return instruction->operand(0);
}
return nullptr;
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kDivide:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRemainder:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kTopK:
case HloOpcode::kSort:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kWhile:
case HloOpcode::kXor: {
const HloInstruction* best_operand = nullptr;
for (const HloInstruction* operand : instruction->operands()) {
if (operand->has_sharding() &&
(best_operand == nullptr ||
hlo_sharding_util::IsShardingMoreSpecific(
operand->sharding(), best_operand->sharding()))) {
best_operand = operand;
}
}
return best_operand;
}
case HloOpcode::kCustomCall: {
if (IsPassthroughCustomOps(instruction)) {
return instruction->operand(0);
}
return nullptr;
}
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCholesky:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kDynamicReshape:
case HloOpcode::kFft:
case HloOpcode::kFusion:
case HloOpcode::kGather:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kPartitionId:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kReplicaId:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
return nullptr;
}
}
bool SupportSpatialPartitioning(
const HloInstruction* instruction,
const ShardingPropagation::ComputationMap& computation_map, bool is_spmd,
bool allow_spmd_sharding_propagation_to_output,
bool allow_spmd_sharding_propagation_to_parameters,
const CustomCallShardingHelper* sharding_helper) {
const bool is_entry_root = instruction->parent()
->parent()
->entry_computation()
->root_instruction() == instruction;
if (instruction->parent()->root_instruction() == instruction &&
computation_map.find(instruction->parent()) == computation_map.end() &&
!(is_entry_root && allow_spmd_sharding_propagation_to_output)) {
return false;
}
if (instruction->IsElementwise() &&
(instruction->opcode() != HloOpcode::kRng || is_spmd)) {
return true;
}
switch (instruction->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kReduce:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
return true;
case HloOpcode::kParameter:
return allow_spmd_sharding_propagation_to_parameters ||
computation_map.find(instruction->parent()) !=
computation_map.end();
case HloOpcode::kReverse:
return is_spmd;
case HloOpcode::kCustomCall:
if (!is_spmd) {
return false;
}
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target())) {
return partitioner->IsCustomCallShardable(instruction);
}
return (IsPassthroughCustomOps(instruction) ||
sharding_helper->IsCustomCallShardable(instruction));
default:
return false;
}
}
std::optional<HloSharding> LookaheadUserSharding(HloInstruction* instr,
bool is_spmd,
const CallGraph& call_graph) {
if (instr->user_count() != 1) {
return std::nullopt;
}
HloInstruction* current_user = instr->users()[0];
std::optional<HloSharding> sharding;
std::vector<HloInstruction*> users_chain = {instr, current_user};
while (!current_user->has_sharding()) {
if (current_user->users().size() != 1) {
users_chain.clear();
break;
}
current_user = current_user->users()[0];
users_chain.push_back(current_user);
}
if (users_chain.empty()) {
return std::nullopt;
}
for (int i = users_chain.size() - 1; i >= 1; --i) {
HloInstruction* user = users_chain[i];
HloInstruction* current = users_chain[i - 1];
CHECK(user->has_sharding());
sharding = ShardingPropagation::GetShardingFromUser(
*current, *user, INT64_MAX, is_spmd, call_graph,
nullptr);
if (sharding.has_value() && i != 1) {
current->set_sharding(*sharding);
continue;
}
break;
}
for (int i = 1; i < users_chain.size() - 1; ++i) {
users_chain[i]->clear_sharding();
}
return sharding;
}
bool InferGatherParallelShardingFromOperands(
HloInstruction* instruction,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,
bool may_combine_partial_sharding) {
CHECK(DynCast<HloGatherInstruction>(instruction));
bool changed = false;
auto output_parallel_dims = hlo_sharding_util::GetGatherParallelOutputDims(
*instruction, parallel_dims);
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
instruction->operand(0)->sharding(), instruction->shape(),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
instruction->operand(1)->sharding(), instruction->shape(),
absl::MakeConstSpan(parallel_dims.indices_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, may_combine_partial_sharding);
}
return changed;
}
bool InferScatterParallelShardingFromOperands(
HloInstruction* instruction,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,
bool may_combine_partial_sharding) {
HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(instruction);
CHECK(scatter);
const int64_t operand_count = scatter->scatter_operand_count();
auto scatter_operands = scatter->scatter_operands();
auto scatter_indices = scatter->scatter_indices();
auto scatter_updates = scatter->scatter_updates();
bool changed = false;
auto update_parallel_dims = hlo_sharding_util::GetScatterParallelUpdateDims(
*instruction, parallel_dims);
Shape shape = operand_count == 1
? instruction->shape()
: ShapeUtil::GetSubshape(instruction->shape(), {0});
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {
changed |= MaybeImproveInstructionSubSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_operands[i]->sharding(), shape,
absl::MakeConstSpan(parallel_dims.operand_parallel_dims),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims)),
instruction, {i}, may_combine_partial_sharding);
}
}
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)) {
auto parallel_sharding_from_indices = hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_indices->sharding(), shape,
absl::MakeConstSpan(parallel_dims.indices_parallel_dims),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims));
for (int64_t i = 0; i != operand_count; ++i) {
changed |= MaybeImproveInstructionSubSharding(
parallel_sharding_from_indices, instruction, {i},
may_combine_partial_sharding);
}
}
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {
changed |= MaybeImproveInstructionSubSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_updates[i]->sharding(), shape,
absl::MakeConstSpan(update_parallel_dims),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims)),
instruction, {i}, may_combine_partial_sharding);
}
}
return changed;
}
bool CanPropagateThroughAtAggressiveLevel(const HloInstruction& inst,
int64_t aggressiveness) {
if (aggressiveness < 1 &&
!(inst.IsElementwise() || inst.IsCustomCall("Sharding")) &&
inst.opcode() != HloOpcode::kTranspose &&
inst.opcode() != HloOpcode::kReshape &&
inst.opcode() != HloOpcode::kTuple &&
inst.opcode() != HloOpcode::kGetTupleElement &&
inst.opcode() != HloOpcode::kWhile &&
inst.opcode() != HloOpcode::kDynamicSlice &&
inst.opcode() != HloOpcode::kDynamicUpdateSlice &&
inst.opcode() != HloOpcode::kOptimizationBarrier &&
inst.opcode() != HloOpcode::kConcatenate &&
inst.opcode() != HloOpcode::kCall && inst.opcode() != HloOpcode::kCopy) {
return false;
}
if (aggressiveness < 2 && inst.opcode() == HloOpcode::kBroadcast) {
return false;
}
return true;
}
bool SameShardingMetadata(const HloSharding& a, const HloSharding& b) {
DCHECK_EQ(a, b);
auto same_metadata = [](absl::Span<const OpMetadata> a,
absl::Span<const OpMetadata> b) {
if (a.size() != b.size()) return false;
for (int i = 0, e = a.size(); i < e; ++i) {
if (!protobuf_util::ProtobufEquals(a[i], b[i])) {
return false;
}
}
return true;
};
if (a.IsTuple()) {
for (int i = 0, e = a.tuple_elements().size(); i < e; ++i) {
if (!same_metadata(a.tuple_elements()[i].metadata(),
b.tuple_elements()[i].metadata())) {
return false;
}
}
return true;
} else {
return same_metadata(a.metadata(), b.metadata());
}
}
bool AssignShardingMetadata(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
const auto& metadata = instruction->metadata();
if (!instruction->has_sharding() || metadata.ByteSizeLong() == 0) {
continue;
}
HloSharding sharding_with_metadata =
instruction->sharding().WithMetadata({metadata},
false);
if (!SameShardingMetadata(instruction->sharding(),
sharding_with_metadata)) {
instruction->set_sharding(std::move(sharding_with_metadata));
changed = true;
}
}
}
return changed;
}
bool RemoveShardingMetadata(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (!instruction->has_sharding()) {
continue;
}
HloSharding sharding_no_metadata =
instruction->sharding().WithoutMetadata();
if (!SameShardingMetadata(instruction->sharding(),
sharding_no_metadata)) {
instruction->set_sharding(std::move(sharding_no_metadata));
changed = true;
}
}
}
return changed;
}
absl::Status CheckAndUpdateDeviceAssignmentsInWhileBody(
HloInstruction* while_instruction) {
auto bad_status = [](HloInstruction* instruction, int64_t device,
HloInstruction* channel_instruction,
int64_t correct_device) {
return FailedPrecondition(
"Instruction: %s is on device: %d, which conflicts with device: %d "
"of channel instruction: %s",
instruction->name(), device, correct_device,
channel_instruction->name());
};
CHECK_EQ(while_instruction->opcode(), HloOpcode::kWhile);
HloComputation* while_body = while_instruction->while_body();
std::map<int64_t, HloInstruction*> devices_to_instructions;
std::optional<int64_t> unique_device = std::nullopt;
HloInstruction* channel_instruction = nullptr;
for (HloInstruction* instruction : while_body->instructions()) {
if (instruction->sharding_unique_device()) {
auto opcode = instruction->opcode();
int64_t device = *instruction->sharding_unique_device();
if (unique_device.has_value()) {
if (*unique_device != device) {
return bad_status(instruction, device, channel_instruction,
*unique_device);
}
} else if (((opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv) &&
!Cast<HloSendRecvInstruction>(instruction)
->is_host_transfer())
|| ((opcode == HloOpcode::kAllReduce ||
opcode == HloOpcode::kReduceScatter) &&
instruction->channel_id())) {
channel_instruction = instruction;
unique_device = device;
if (!devices_to_instructions.empty()) {
for (auto it = devices_to_instructions.begin();
it != devices_to_instructions.end(); ++it) {
if (*unique_device != it->first) {
return bad_status(it->second, it->first, channel_instruction,
*unique_device);
}
}
}
} else {
devices_to_instructions[device] = instruction;
}
}
}
if (unique_device.has_value()) {
auto while_device = while_instruction->sharding_unique_device();
if (while_device.has_value() && *unique_device != *while_device) {
return bad_status(while_instruction, *while_device, channel_instruction,
*unique_device);
}
auto body_root = while_body->root_instruction();
auto root_device = body_root->sharding_unique_device();
if (!root_device.has_value()) {
body_root->set_device_sharding(*unique_device);
} else if (*unique_device != *root_device) {
return bad_status(body_root, *root_device, channel_instruction,
*unique_device);
}
}
return absl::OkStatus();
}
bool RefineManualAutoShardingFromAuto(
const HloSharding& to_merge, absl::Span<const int64_t> unspecified_dims,
HloSharding* auto_sharding, HloSharding* manual_sharding) {
if (!manual_sharding->IsManualSubgroup() ||
auto_sharding->IsManualSubgroup() ||
!manual_sharding->HasPartialReplication() ||
manual_sharding->subgroup_types().size() != 2) {
return false;
}
HloSharding partial_rep =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
to_merge, unspecified_dims);
if (partial_rep.IsTileMaximal()) {
return false;
}
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep,
auto_sharding)) {
return false;
}
const int64_t data_rank = partial_rep.TiledDataRank();
std::vector<int64_t> partial_manual_shape(
partial_rep.tile_assignment().dimensions().begin(),
partial_rep.tile_assignment().dimensions().end());
partial_manual_shape.insert(partial_manual_shape.begin() + data_rank, 1);
auto partial_tiling_for_manual =
partial_rep.tile_assignment().Reshape(partial_manual_shape);
HloSharding partial_rep_for_manual = HloSharding::PartialTile(
partial_tiling_for_manual, partial_rep.metadata());
auto man_tiling = manual_sharding->tile_assignment();
if (manual_sharding->subgroup_types().back() != OpSharding::REPLICATED) {
std::vector<int> transposed_dims(man_tiling.num_dimensions());
absl::c_iota(transposed_dims, 0);
std::swap(transposed_dims.back(), transposed_dims[data_rank]);
man_tiling = man_tiling.Transpose(transposed_dims);
}
HloSharding tmp_sharding_for_merging = HloSharding::PartialTile(
std::move(man_tiling), manual_sharding->metadata());
if (!hlo_sharding_util::MergeShardingIfCompatible(
partial_rep_for_manual, &tmp_sharding_for_merging)) {
return false;
}
std::vector<OpSharding::Type> subgroup_types;
subgroup_types.push_back(OpSharding::MANUAL);
if (tmp_sharding_for_merging.HasPartialReplication()) {
subgroup_types.push_back(OpSharding::REPLICATED);
}
*manual_sharding = HloSharding::Subgroup(
tmp_sharding_for_merging.tile_assignment(), subgroup_types,
tmp_sharding_for_merging.metadata());
return true;
}
bool RefineManualAutoShardingFromManual(
const HloSharding& to_merge, absl::Span<const int64_t> unspecified_dims,
HloSharding* auto_sharding, HloSharding* manual_sharding) {
if (!to_merge.IsManualSubgroup() || !manual_sharding->IsManualSubgroup() ||
!manual_sharding->HasPartialReplication() ||
auto_sharding->IsManualSubgroup() ||
manual_sharding->subgroup_types().size() != 2) {
return false;
}
HloSharding partial_rep =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
to_merge, unspecified_dims);
if (partial_rep.IsTileMaximal()) {
return false;
}
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep,
manual_sharding)) {
return false;
}
HloSharding partial_rep_for_auto = HloSharding::Subgroup(
partial_rep.tile_assignment(),
std::vector<OpSharding::Type>(partial_rep.subgroup_types().size(),
OpSharding::REPLICATED),
partial_rep.metadata());
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep_for_auto,
auto_sharding)) {
return false;
}
return true;
}
bool InferUnspecifiedDimsFromOperand(HloInstruction* annotate_op,
absl::Span<const int64_t> unspecified_dims,
HloInstruction** man_conversion_op_after) {
CHECK(annotate_op->IsCustomCall("Sharding") ||
annotate_op->opcode() == HloOpcode::kCopy);
if (!hlo_sharding_util::IsSpatiallyPartitioned(annotate_op->operand(0))) {
return false;
}
const HloSharding& operand_sharding = annotate_op->operand(0)->sharding();
if (!operand_sharding.IsTiled()) {
return false;
}
HloInstruction* man_conversion_op = nullptr;
if (annotate_op->user_count() == 1) {
HloInstruction* user = annotate_op->users()[0];
if (user->IsCustomCall("SPMDFullToShardShape") ||
user->IsCustomCall("SPMDShardToFullShape")) {
std::vector<int64_t> user_unspec_dims;
if (!sharding_op_util::ParseAttributes(
Cast<HloCustomCallInstruction>(user)->opaque(),
&user_unspec_dims)
.ok()) {
return false;
}
absl::c_sort(user_unspec_dims);
if (unspecified_dims != user_unspec_dims) {
return false;
}
man_conversion_op = user;
}
}
*man_conversion_op_after = man_conversion_op;
if (man_conversion_op == nullptr) {
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
operand_sharding, unspecified_dims);
HloSharding sharding = annotate_op->sharding();
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
return false;
}
annotate_op->set_sharding(sharding);
return true;
}
if (man_conversion_op->IsCustomCall("SPMDFullToShardShape")) {
HloSharding auto_sharding = annotate_op->sharding();
HloSharding manual_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromAuto(operand_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(auto_sharding);
man_conversion_op->set_sharding(manual_sharding);
return true;
}
CHECK(man_conversion_op->IsCustomCall("SPMDShardToFullShape"));
HloSharding manual_sharding = annotate_op->sharding();
HloSharding auto_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromManual(operand_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(manual_sharding);
man_conversion_op->set_sharding(auto_sharding);
return true;
}
bool InferUnspecifiedDimsFromOneUser(HloInstruction* annotate_op,
const HloInstruction* user,
int64_t aggressiveness, bool is_spmd,
absl::Span<const int64_t> unspecified_dims,
HloInstruction* man_conversion_op,
const CallGraph& call_graph) {
CHECK(annotate_op->IsCustomCall("Sharding") ||
annotate_op->opcode() == HloOpcode::kCopy);
if (!user->has_sharding() || !user->sharding().IsTiled()) {
return false;
}
std::optional<HloSharding> user_sharding =
ShardingPropagation::GetShardingFromUser(
man_conversion_op == nullptr ? *annotate_op : *man_conversion_op,
*user, aggressiveness, is_spmd, call_graph,
nullptr);
if (!user_sharding.has_value() || user_sharding->IsTileMaximal()) {
return false;
}
if (man_conversion_op == nullptr) {
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
*user_sharding, unspecified_dims);
HloSharding sharding = annotate_op->sharding();
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
return false;
}
annotate_op->set_sharding(sharding);
return true;
}
if (man_conversion_op->IsCustomCall("SPMDFullToShardShape")) {
HloSharding auto_sharding = annotate_op->sharding();
HloSharding manual_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromManual(*user_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(auto_sharding);
man_conversion_op->set_sharding(manual_sharding);
return true;
}
CHECK(man_conversion_op->IsCustomCall("SPMDShardToFullShape"));
HloSharding manual_sharding = annotate_op->sharding();
HloSharding auto_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromAuto(*user_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(manual_sharding);
man_conversion_op->set_sharding(auto_sharding);
return true;
}
bool InferUnspecifiedDimsFromUsers(HloInstruction* annotate_op,
absl::Span<const int64_t> unspecified_dims,
int64_t aggressiveness, bool is_spmd,
HloInstruction** man_conversion_op_after,
const CallGraph& call_graph) {
HloInstruction* man_conversion_op = nullptr;
if (annotate_op->user_count() == 1) {
HloInstruction* user = annotate_op->users()[0];
if (user->IsCustomCall("SPMDFullToShardShape") ||
user->IsCustomCall("SPMDShardToFullShape")) {
std::vector<int64_t> user_unspec_dims;
absl::c_sort(user_unspec_dims);
if (!sharding_op_util::ParseAttributes(
Cast<HloCustomCallInstruction>(user)->opaque(),
&user_unspec_dims)
.ok() ||
unspecified_dims != user_unspec_dims) {
return false;
}
man_conversion_op = user;
}
}
*man_conversion_op_after = man_conversion_op;
HloInstruction* op_for_users =
man_conversion_op == nullptr ? annotate_op : man_conversion_op;
bool changed = false;
for (HloInstruction* user : op_for_users->users()) {
changed |= InferUnspecifiedDimsFromOneUser(
annotate_op, user, aggressiveness, is_spmd, unspecified_dims,
man_conversion_op, call_graph);
}
return changed;
}
bool InferUnspecifiedDimsFromShardGroup(
HloInstruction* annotate_op, absl::Span<const int64_t> unspecified_dims,
const absl::flat_hash_set<HloInstruction*>& shard_group) {
CHECK(annotate_op->IsCustomCall("Sharding") ||
annotate_op->opcode() == HloOpcode::kCopy);
if (annotate_op->IsCustomCall(spmd::kShardBarrierTo)) {
return false;
}
bool changed = false;
for (const HloInstruction* member : shard_group) {
if (member == annotate_op) {
continue;
}
if (member->IsCustomCall(spmd::kShardBarrierFrom)) {
continue;
}
if (!hlo_sharding_util::IsSpatiallyPartitioned(member)) {
continue;
}
const HloSharding& member_sharding = member->sharding();
if (!member_sharding.IsTiled()) {
continue;
}
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
member_sharding, unspecified_dims);
HloSharding sharding = annotate_op->sharding();
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
continue;
}
annotate_op->set_sharding(sharding);
changed |= true;
}
return changed;
}
bool IsCSEPreventionTarget(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBroadcast &&
instruction->operand(0)->shape().rank() == 0;
}
HloSharding SetCSEPreventionSharding(const HloSharding& sharding) {
OpMetadata metadata;
metadata.set_op_name("_sharding_propagation_cse_prevention");
return sharding.WithMetadata({metadata}, true);
}
bool IsCSEPreventionSharding(const HloSharding& sharding) {
if (sharding.metadata().size() != 1) {
return false;
}
return sharding.metadata()[0].op_name() ==
"_sharding_propagation_cse_prevention";
}
}
bool InferDotShardingFromOperands(
HloInstruction* instruction, const CallGraph& call_graph,
const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,
bool may_combine_partial_sharding, bool is_spmd) {
auto from_operand = [&](int64_t operand_index) {
auto operand = instruction->operand(operand_index);
const HloSharding& operand_sharding = operand->sharding();
if (operand_sharding.IsTileMaximal()) {
return operand_sharding;
}
std::vector<int64_t> contracting_dims;
contracting_dims.reserve(dnums.contracting_dims.size());
for (const auto& dim : dnums.contracting_dims) {
contracting_dims.push_back(operand_index == 0 ? dim.lhs : dim.rhs);
}
for (const auto& dim : operand_index == 0
? dnums.rhs_non_contracting_dims
: dnums.lhs_non_contracting_dims) {
int64_t d = operand_index == 0 ? dim.lhs : dim.rhs;
if (d >= 0) {
contracting_dims.push_back(d);
}
}
auto replicate_contracting_dims =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
operand_sharding, contracting_dims);
std::vector<int64_t> out_dims_to_op_perm(instruction->shape().rank(), -1);
std::vector<int64_t> op_dims_to_output_perm(operand->shape().rank(), -1);
for (const auto& dim : dnums.batch_dims) {
out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs;
op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] =
dim.output;
}
for (const auto& dim : operand_index == 0
? dnums.lhs_non_contracting_dims
: dnums.rhs_non_contracting_dims) {
out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs;
op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] =
dim.output;
}
return *hlo_sharding_util::TransposeShardingWithCollapsedDims(
replicate_contracting_dims, op_dims_to_output_perm,
out_dims_to_op_perm);
};
std::optional<HloSharding> improved_operand_0;
std::optional<HloSharding> improved_operand_1;
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
improved_operand_0 = ReturnImprovedSharding(
from_operand(0), instruction, may_combine_partial_sharding,
false);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
improved_operand_1 = ReturnImprovedSharding(
from_operand(1), instruction, may_combine_partial_sharding,
false);
}
if (!improved_operand_0.has_value() && !improved_operand_1.has_value()) {
return false;
}
if (improved_operand_0.has_value() && !improved_operand_1.has_value()) {
instruction->set_sharding(*improved_operand_0);
return true;
}
if (!improved_operand_0.has_value() && improved_operand_1.has_value()) {
instruction->set_sharding(*improved_operand_1);
return true;
}
CHECK(improved_operand_0.has_value() && improved_operand_1.has_value());
std::optional<HloSharding> lookahead_sharding =
LookaheadUserSharding(instruction, is_spmd, call_graph);
std::array<HloSharding, 2> sharding_priority = {*improved_operand_0,
*improved_operand_1};
bool priority_defined_with_lookahead = false;
if (lookahead_sharding.has_value()) {
const bool operand_0_is_lookahead_subtiling =
hlo_sharding_util::IsSubTilingOrEqualSharding(
instruction->shape(), *lookahead_sharding, *improved_operand_0);
const bool operand_1_is_lookahead_subtiling =
hlo_sharding_util::IsSubTilingOrEqualSharding(
instruction->shape(), *lookahead_sharding, *improved_operand_1);
if (operand_0_is_lookahead_subtiling && !operand_1_is_lookahead_subtiling) {
priority_defined_with_lookahead = true;
}
if (!operand_0_is_lookahead_subtiling && operand_1_is_lookahead_subtiling) {
instruction->set_sharding(*improved_operand_1);
std::swap(sharding_priority[0], sharding_priority[1]);
priority_defined_with_lookahead = true;
}
}
if (!priority_defined_with_lookahead &&
ShapeUtil::ByteSizeOf(instruction->operand(0)->shape()) <
ShapeUtil::ByteSizeOf(instruction->operand(1)->shape())) {
std::swap(sharding_priority[0], sharding_priority[1]);
}
instruction->set_sharding(sharding_priority[0]);
MaybeImproveInstructionSharding(sharding_priority[1], instruction,
may_combine_partial_sharding);
return true;
}
bool InferConvolutionShardingFromOperands(HloInstruction* instruction,
const CallGraph& call_graph,
int64_t aggressiveness,
bool may_combine_partial_sharding,
bool is_spmd) {
auto get_partitions_for_dims =
[&](const HloInstruction* inst,
absl::Span<
const dot_as_convolution_util::DotConvolutionDimsInfo::DimNums>
dims,
int lhs_or_rhs) {
int64_t partitions = 1;
if (!inst->has_sharding()) {
return partitions;
}
const auto& sharding = inst->sharding();
if (sharding.IsTileMaximal()) {
return partitions;
}
for (const auto& dim : dims) {
if (lhs_or_rhs == 0) {
partitions *= sharding.tile_assignment().dim(dim.lhs);
} else {
CHECK_EQ(lhs_or_rhs, 1);
partitions *= sharding.tile_assignment().dim(dim.rhs);
}
}
return partitions;
};
auto dot_dims =
dot_as_convolution_util::ParseConvolutionDimsInfo(instruction);
const int64_t lhs_conv_spatial_partitions = get_partitions_for_dims(
instruction->operand(0), dot_dims.conv_spatial_dims, 0);
const int64_t rhs_conv_spatial_partitions = get_partitions_for_dims(
instruction->operand(1), dot_dims.conv_spatial_dims, 1);
if (dot_dims.conv_spatial_dims.empty() ||
(lhs_conv_spatial_partitions == 1 && rhs_conv_spatial_partitions == 1 &&
instruction->batch_group_count() == 1 &&
instruction->feature_group_count() == 1)) {
return InferDotShardingFromOperands(instruction, call_graph, dot_dims,
may_combine_partial_sharding, is_spmd);
}
const auto& dnums = instruction->convolution_dimension_numbers();
const HloInstruction* lhs = instruction->operand(0);
auto get_tiled_sharding_based_on_lhs = [&] {
CHECK(!lhs->sharding().IsTileMaximal());
std::vector<int64_t> output_to_lhs_indices(instruction->shape().rank());
output_to_lhs_indices[dnums.output_batch_dimension()] =
dnums.input_batch_dimension();
output_to_lhs_indices[dnums.output_feature_dimension()] =
dnums.input_feature_dimension();
for (int64_t i = 0; i < dnums.input_spatial_dimensions_size(); ++i) {
output_to_lhs_indices[dnums.output_spatial_dimensions(i)] =
dnums.input_spatial_dimensions(i);
}
return hlo_sharding_util::TransposeSharding(lhs->sharding(),
output_to_lhs_indices);
};
if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) {
return false;
}
if (lhs->sharding().IsTileMaximal()) {
return MaybeImproveInstructionSharding(lhs->sharding(), instruction,
may_combine_partial_sharding);
}
if (IsConvolutionKernelSmall(instruction)) {
const auto& tile_assignment = lhs->sharding().tile_assignment();
if (tile_assignment.dim(dnums.input_feature_dimension()) > 1) {
return false;
}
return MaybeImproveInstructionSharding(get_tiled_sharding_based_on_lhs(),
instruction,
may_combine_partial_sharding);
}
return MaybeImproveInstructionSharding(
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
lhs->sharding(), {dnums.input_batch_dimension()}),
instruction, may_combine_partial_sharding);
}
std::optional<HloSharding> InferBroadcastOperandSharding(
const HloInstruction& instruction, bool is_spmd) {
if (instruction.sharding().IsReplicated() ||
instruction.sharding().IsManual()) {
return instruction.sharding();
}
std::vector<int64_t> dims_to_replicate;
bool needs_replication = false;
for (int64_t i = 0; i < instruction.shape().rank(); ++i) {
if (absl::c_count(instruction.dimensions(), i) == 0) {
dims_to_replicate.push_back(i);
if (instruction.sharding().tile_assignment().dim(i) > 1) {
needs_replication = true;
}
}
}
if (!is_spmd && needs_replication) {
return std::nullopt;
}
return hlo_sharding_util::RemoveShapeDimensions(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
instruction.sharding(), dims_to_replicate),
dims_to_replicate);
}
bool InferReduceShardingFromOperand(HloInstruction* instruction,
bool may_combine_partial_sharding,
bool is_spmd) {
auto get_maybe_tuple_sharding = [&](HloSharding sharding) {
if (instruction->shape().IsArray()) {
return sharding;
}
std::vector<HloSharding> tuple(instruction->shape().tuple_shapes_size(),
std::move(sharding));
return HloSharding::Tuple(instruction->shape(), tuple);
};
auto* reduce = Cast<HloReduceInstruction>(instruction);
bool changed = false;
for (int64_t i = 0; i != reduce->inputs().size(); ++i) {
HloInstruction* operand = reduce->inputs()[i];
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
continue;
}
if (operand->sharding().IsManual()) {
changed |= MaybeImproveInstructionSubSharding(
operand->sharding(), reduce, {i}, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
if (operand->sharding().IsReplicated() ||
(!is_spmd &&
absl::c_any_of(instruction->dimensions(), [operand](int64_t dim) {
return operand->sharding().tile_assignment().dim(dim) > 1;
}))) {
changed |= MaybeImproveInstructionSharding(
get_maybe_tuple_sharding(
hlo_sharding_util::ReplicateAllDataDims(operand->sharding())),
reduce, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
auto after_partial_replication =
operand->sharding().IsReplicated()
? operand->sharding()
: hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
operand->sharding(), reduce->dimensions());
if (after_partial_replication.IsReplicated()) {
changed |= MaybeImproveInstructionSharding(
get_maybe_tuple_sharding(after_partial_replication), reduce,
may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
HloSharding new_sharding =
get_maybe_tuple_sharding(hlo_sharding_util::RemoveShapeDimensions(
after_partial_replication, reduce->dimensions()));
changed |= MaybeImproveInstructionSharding(
std::move(new_sharding), reduce, may_combine_partial_sharding,
ComputeNonRootUsers(reduce) == 1);
}
return changed;
}
absl::StatusOr<bool> ProcessShardingInstruction(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
bool replace_sharding_with_copy,
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>*
unspecified_dims,
std::vector<HloSharding>* saved_root_shardings,
absl::flat_hash_map<int64_t, HloSharding>* saved_parameter_shardings,
absl::flat_hash_map<HloInstruction*, int64_t>*
instruction_to_shard_group_id,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>*
shard_group_id_to_shard_as_group,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>*
shard_group_id_to_shard_like_group,
const std::vector<bool>*
allow_spmd_sharding_propagation_to_parameters_vector,
bool remove_unknown_shardings) {
bool changed = false;
const bool use_shard_group = instruction_to_shard_group_id &&
shard_group_id_to_shard_as_group &&
shard_group_id_to_shard_like_group;
auto process_shard_group_instruction =
[&](HloInstruction* instruction,
bool replaced_with_copy) -> absl::StatusOr<bool> {
if (replace_sharding_with_copy) {
if (use_shard_group && instruction->has_sharding() &&
instruction->sharding().IsShardGroup()) {
if (instruction->IsCustomCall("Sharding")) {
CHECK(instruction->operand(0)->opcode() != HloOpcode::kParameter ||
(allow_spmd_sharding_propagation_to_parameters_vector &&
allow_spmd_sharding_propagation_to_parameters_vector->size() ==
module->entry_computation()->num_parameters() &&
allow_spmd_sharding_propagation_to_parameters_vector->at(
instruction->operand(0)->parameter_number())));
}
if (instruction->IsCustomCall("Sharding") && !replaced_with_copy) {
HloSharding operand_sharding =
instruction->operand(0)->has_sharding()
? instruction->operand(0)->sharding()
: HloSharding::Unknown();
operand_sharding.SetShardGroup(
instruction->sharding().GetShardGroup());
instruction->mutable_operand(0)->set_sharding(
std::move(operand_sharding));
return true;
} else {
const int64_t shard_group_id =
instruction->sharding().GetShardGroup().shard_group_id;
(*instruction_to_shard_group_id)[instruction] = shard_group_id;
if (instruction->sharding().IsShardAs()) {
auto& shard_as_group =
(*shard_group_id_to_shard_as_group)[shard_group_id];
if (!shard_as_group.empty()) {
CHECK(ShapeUtil::SameDimensions(
instruction->shape(), (*shard_as_group.begin())->shape()))
<< "Instruction: " << instruction->ToString()
<< " has different shape from the shapes of the other "
"instructions within the same shard_as group: "
<< (*shard_as_group.begin())->shape().ToString();
}
shard_as_group.insert(instruction);
} else {
auto& shard_like_group =
(*shard_group_id_to_shard_like_group)[shard_group_id];
if (!shard_like_group.empty()) {
CHECK(ShapeUtil::SameDimensions(
instruction->shape(), (*shard_like_group.begin())->shape()))
<< "Instruction: " << instruction->ToString()
<< " has different shape from the shapes of the other "
"instructions within the same shard_like group: "
<< (*shard_like_group.begin())->shape().ToString();
}
shard_like_group.insert(instruction);
}
HloSharding sharding = instruction->sharding();
sharding.ClearShardGroup();
instruction->set_sharding(std::move(sharding));
}
}
}
return false;
};
for (HloComputation* computation : module->computations(execution_threads)) {
auto instructions = computation->MakeInstructionPostOrder();
for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {
HloInstruction* instruction = *it;
if (instruction->IsCustomCall("Sharding")) {
TF_RET_CHECK(instruction->has_sharding())
<< "Sharding instruction must have a sharding attribute";
VLOG(3) << "ProcessShardingInstruction: " << instruction->ToString();
HloSharding original_sharding = instruction->sharding();
std::vector<int64_t> unspec_dims;
TF_RETURN_IF_ERROR(sharding_op_util::ParseAttributes(
Cast<HloCustomCallInstruction>(instruction)->opaque(),
&unspec_dims));
bool replaced_with_copy =
replace_sharding_with_copy &&
(!original_sharding.IsUnknown() || remove_unknown_shardings ||
instruction->operand(0)->opcode() == HloOpcode::kParameter);
if (replaced_with_copy) {
auto copy = computation->AddInstruction(HloInstruction::CreateUnary(
instruction->shape(), HloOpcode::kCopy,
instruction->mutable_operand(0)));
TF_ASSIGN_OR_RETURN(
std::ignore, computation->ReplaceInstruction(
instruction, copy, false,
false,
false));
copy->set_sharding(std::move(original_sharding));
instruction = copy;
changed = true;
}
TF_ASSIGN_OR_RETURN(
bool shard_group_remove_instruction,
process_shard_group_instruction(instruction, replaced_with_copy));
if (!unspec_dims.empty()) {
absl::c_sort(unspec_dims);
unspecified_dims->emplace(instruction, std::move(unspec_dims));
} else if (!instruction->operand(0)->has_sharding()) {
instruction->mutable_operand(0)->set_sharding(
instruction->sharding());
}
if (shard_group_remove_instruction) {
TF_ASSIGN_OR_RETURN(std::ignore,
computation->ReplaceInstruction(
instruction, instruction->mutable_operand(0),
false,
false,
false));
}
} else {
TF_ASSIGN_OR_RETURN(std::ignore,
process_shard_group_instruction(
instruction, false));
}
}
}
HloInstruction* root_instr = module->entry_computation()->root_instruction();
if (saved_root_shardings != nullptr && root_instr->shape().IsTuple() &&
root_instr->has_sharding()) {
saved_root_shardings->reserve(
root_instr->sharding().tuple_elements().size());
for (const HloSharding& sharding :
root_instr->sharding().tuple_elements()) {
saved_root_shardings->push_back(sharding);
}
}
if (saved_parameter_shardings != nullptr) {
auto params = module->entry_computation()->parameter_instructions();
for (int64_t i = 0; i < params.size(); ++i) {
if (params[i]->has_sharding()) {
saved_parameter_shardings->insert({i, params[i]->sharding()});
}
}
}
return changed;
}
int64_t ComputeNonRootUsers(const HloInstruction* instr) {
int64_t non_root_users = instr->users().size();
for (int i = 0; i < instr->users().size(); ++i) {
if (instr->users()[i] == instr->parent()->root_instruction()) {
--non_root_users;
}
}
return non_root_users;
}
absl::Status ShardingPropagation::NormalizeDomain(
const DomainMetadata::Domain& domain, const DomainMetadata* metadata) {
if (metadata != nullptr) {
TF_ASSIGN_OR_RETURN(const auto& sharding_metadata,
ShardingMetadata::ToShardingMetadata(metadata));
const auto& sharding = sharding_metadata->sharding();
if (sharding != nullptr) {
bool is_spatially_partitioned = !sharding->HasUniqueDevice();
if (sharding->IsTuple()) {
is_spatially_partitioned = absl::c_any_of(
sharding->tuple_elements(),
[](const HloSharding& s) { return !s.HasUniqueDevice(); });
}
if (is_spatially_partitioned) {
for (HloInstruction* d : domain.exit_domains) {
HloInstruction* operand = d->mutable_operand(0);
if (!operand->has_sharding() || operand->sharding() != *sharding) {
HloSharding operand_sharding = *sharding;
if (operand->shape().IsTuple() && !sharding->IsTuple()) {
operand_sharding =
HloSharding::SingleTuple(operand->shape(), *sharding);
}
operand->set_sharding(std::move(operand_sharding));
}
}
return absl::OkStatus();
}
}
}
return ShardingMetadata::NormalizeShardingDomain(domain, metadata);
}
std::optional<HloSharding> ShardingPropagation::GetShardingFromUser(
const HloInstruction& instruction, const HloInstruction& user,
int64_t aggressiveness, bool is_spmd, const CallGraph& call_graph,
const CustomCallShardingHelper* sharding_helper) {
if (!CanPropagateThroughAtAggressiveLevel(user, aggressiveness)) {
return std::nullopt;
}
if (!hlo_sharding_util::IsSpatiallyPartitioned(&user)) {
return std::nullopt;
}
const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0;
switch (user.opcode()) {
case HloOpcode::kBroadcast: {
return InferBroadcastOperandSharding(user, is_spmd);
}
case HloOpcode::kConcatenate: {
if (aggressiveness == 0) {
return std::nullopt;
}
if (user.sharding().IsReplicated()) {
return user.sharding();
}
const int64_t cdim = user.concatenate_dimension();
auto& tile_assignment = user.sharding().tile_assignment();
if (tile_assignment.dim(cdim) == 1) {
return user.sharding();
}
if (is_spmd) {
return user.sharding();
}
int64_t start_offset = 0;
for (HloInstruction* op : user.operands()) {
if (op == &instruction) {
break;
}
start_offset += op->shape().dimensions(cdim);
}
const int64_t tile_shape = CeilOfRatio(
user.shape().dimensions(cdim), tile_assignment.dimensions()[cdim]);
std::vector<int64_t> start_indices(tile_assignment.num_dimensions());
std::vector<int64_t> end_indices(tile_assignment.dimensions().begin(),
tile_assignment.dimensions().end());
start_indices[cdim] = start_offset / tile_shape;
end_indices[cdim] = CeilOfRatio(
start_offset + instruction.shape().dimensions(cdim), tile_shape);
auto new_tile_assignment =
tile_assignment.array().Slice(start_indices, end_indices);
if (new_tile_assignment.num_elements() == 1) {
return HloSharding::AssignDevice(*new_tile_assignment.begin(),
user.sharding().metadata());
}
return HloSharding::Tile(std::move(new_tile_assignment),
user.sharding().metadata());
}
case HloOpcode::kConvolution: {
auto dot_dims = dot_as_convolution_util::ParseConvolutionDimsInfo(&user);
if (dot_dims.conv_spatial_dims.empty()) {
int64_t op_idx = user.operand_index(&instruction);
return hlo_sharding_util::InferDotOperandSharding(
&user, op_idx, dot_dims, true,
may_combine_partial_sharding);
}
return std::nullopt;
}
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice: {
if (aggressiveness == 0) {
return std::nullopt;
}
if (user.sharding().IsReplicated()) {
return user.sharding();
}
if (user.opcode() == HloOpcode::kDynamicUpdateSlice &&
&instruction == user.operand(0)) {
return user.sharding();
}
const HloInstruction* operand = user.opcode() == HloOpcode::kDynamicSlice
? user.operand(0)
: user.operand(1);
if (&instruction != operand) {
return std::nullopt;
}
std::vector<int64_t> slice_dims;
for (int64_t i = 0; i < user.shape().rank(); ++i) {
if (user.shape().dimensions(i) != operand->shape().dimensions(i)) {
slice_dims.push_back(i);
}
}
return hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
user.sharding(), slice_dims);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(&user);
if (!absl::c_linear_search(reduce_window->inputs(), &instruction)) {
return std::nullopt;
}
if (reduce_window->shape().IsTuple()) {
auto sub_sharding = reduce_window->sharding().GetSubSharding(
reduce_window->shape(),
{reduce_window->operand_index(&instruction)});
return sub_sharding;
}
return reduce_window->sharding();
}
case HloOpcode::kReshape: {
return hlo_sharding_util::PropagateShardingThroughReshape(
user.shape(), instruction.shape(), user.sharding());
}
case HloOpcode::kPad: {
if (&instruction != user.operand(0)) {
return std::nullopt;
}
return user.sharding();
}
case HloOpcode::kSlice: {
return user.sharding();
}
case HloOpcode::kTranspose: {
std::vector<int64_t> reverse_dimensions(user.dimensions().size());
for (int64_t i = 0; i < user.dimensions().size(); ++i) {
reverse_dimensions[user.dimensions(i)] = i;
}
return hlo_sharding_util::TransposeSharding(user.sharding(),
reverse_dimensions);
}
case HloOpcode::kTuple: {
auto sub_sharding = user.sharding().GetSubSharding(
user.shape(), {user.operand_index(&instruction)});
for (int64_t i = 0; i < user.shape().tuple_shapes_size(); ++i) {
if (user.operand(i) == &instruction) {
HloSharding alternative_sub_sharding =
user.sharding().GetSubSharding(user.shape(), {i});
if (hlo_sharding_util::IsShardingMoreSpecific(
alternative_sub_sharding, sub_sharding)) {
sub_sharding = alternative_sub_sharding;
}
}
}
return sub_sharding;
}
case HloOpcode::kGetTupleElement: {
int64_t sharding_index = 0;
for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) {
if (i == user.tuple_index()) {
break;
}
if (instruction.shape().tuple_shapes(i).IsArray()) {
sharding_index += 1;
} else {
sharding_index +=
ShapeUtil::GetLeafCount(instruction.shape().tuple_shapes(i));
}
}
auto base_instruction_sharding = [&](const HloSharding& user_sharding) {
if (instruction.has_sharding()) {
return instruction.sharding();
} else {
std::vector<HloSharding> shardings;
ShapeUtil::ForEachSubshape(
instruction.shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
if (ShapeUtil::IsLeafIndex(instruction.shape(), index)) {
shardings.push_back(hlo_sharding_util::ReplicateAllDataDims(
user_sharding, sub_shape.dimensions_size()));
}
});
return HloSharding::Tuple(instruction.shape(), shardings);
}
};
if (user.shape().IsArray()) {
HloSharding new_sharding = base_instruction_sharding(user.sharding());
new_sharding.tuple_elements()[sharding_index] = user.sharding();
return new_sharding;
} else {
if (user.sharding().tuple_elements().empty()) {
return std::nullopt;
}
HloSharding new_sharding =
base_instruction_sharding(user.sharding().tuple_elements()[0]);
for (int64_t i = 0; i < user.sharding().tuple_elements().size(); ++i) {
new_sharding.tuple_elements()[sharding_index + i] =
user.sharding().tuple_elements()[i];
}
return new_sharding;
}
}
case HloOpcode::kDot: {
int64_t op_idx = user.operand_index(&instruction);
auto dnums = dot_as_convolution_util::ParseDotGeneralFromDot(&user);
return hlo_sharding_util::InferDotOperandSharding(
&user, op_idx, dnums, true,
may_combine_partial_sharding);
}
case HloOpcode::kReduce: {
if (instruction.shape().rank() == 0) {
return std::nullopt;
}
auto user_sharding =
user.shape().IsTuple()
? user.sharding().GetSubSharding(
user.shape(), {user.operand_index(&instruction)})
: user.sharding();
if (!user_sharding.IsTileMaximal()) {
std::vector<int64_t> target_tile_assignment_dimensions(
instruction.shape().rank() +
(user_sharding.ReplicateOnLastTileDim() ? 1 : 0) +
user_sharding.subgroup_types().size());
const auto& dimensions = user.dimensions();
int64_t next_output_dim = 0;
for (int64_t i = 0; i < target_tile_assignment_dimensions.size(); ++i) {
if (absl::c_find(dimensions, i) == dimensions.end()) {
target_tile_assignment_dimensions[i] =
user_sharding.tile_assignment().dim(next_output_dim++);
} else {
target_tile_assignment_dimensions[i] = 1;
}
}
auto tile_assignment = user_sharding.tile_assignment().Reshape(
target_tile_assignment_dimensions);
user_sharding =
user_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
user_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
user_sharding.subgroup_types(),
user_sharding.metadata());
}
const auto* reduce = Cast<const HloReduceInstruction>(&user);
for (const HloInstruction* operand : reduce->inputs()) {
if (operand != &instruction && operand->has_sharding()) {
hlo_sharding_util::MergeShardingIfCompatible(operand->sharding(),
&user_sharding);
}
}
return user_sharding;
}
case HloOpcode::kSort: {
HloSharding user_sharding = user.sharding();
if (user_sharding.IsTuple()) {
return user_sharding.GetSubSharding(user.shape(),
{user.operand_index(&instruction)});
}
return user_sharding;
}
case HloOpcode::kReverse: {
return hlo_sharding_util::ReverseSharding(user.sharding(),
user.dimensions());
}
case HloOpcode::kOutfeed: {
if (&instruction != user.operand(0)) {
return std::nullopt;
}
std::vector<Shape> operand_shapes(user.operand_count());
for (int i = 0; i < user.operand_count(); ++i) {
operand_shapes[i] = user.operand(i)->shape();
}
return user.sharding().GetSubSharding(
ShapeUtil::MakeTupleShape(operand_shapes), {0});
}
case HloOpcode::kGather: {
if (&instruction == user.operand(1)) {
return hlo_sharding_util::
GatherIndexShardingFromOutputIndexPassthroughDimensions(
user.sharding(), &user);
}
if (is_spmd) {
return hlo_sharding_util::GatherOperandShardingFromOutput(
user.sharding(), user, call_graph);
}
return std::nullopt;
}
case HloOpcode::kScatter: {
auto& scatter_user = *Cast<HloScatterInstruction>(&user);
const int64_t operand_count = scatter_user.scatter_operand_count();
auto scatter_operands = scatter_user.scatter_operands();
auto scatter_indices = scatter_user.scatter_indices();
auto scatter_updates = scatter_user.scatter_updates();
const int64_t operand_index =
absl::c_find(scatter_operands, &instruction) -
scatter_operands.cbegin();
if (operand_index < operand_count) {
return user.sharding().IsTuple() ? user.sharding().GetSubSharding(
user.shape(), {operand_index})
: user.sharding();
}
if (&instruction == scatter_indices) {
std::vector<const HloInstruction*> partitioned_updates;
for (const HloInstruction* update : scatter_updates) {
if (hlo_sharding_util::IsSpatiallyPartitioned(update)) {
partitioned_updates.push_back(update);
}
}
if (partitioned_updates.empty()) {
return std::nullopt;
}
std::vector<HloSharding> shardings;
absl::c_transform(
partitioned_updates, std::back_inserter(shardings),
[&scatter_user](const HloInstruction* update) {
return hlo_sharding_util::
ScatterIndexShardingFromUpdateIndexPassthroughDimensions(
update->sharding(), &scatter_user);
});
return hlo_sharding_util::FindCommonSharding(shardings);
}
const int64_t update_index = absl::c_find(scatter_updates, &instruction) -
scatter_updates.cbegin();
CHECK_LE(update_index, operand_count);
auto from_indices =
hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)
? hlo_sharding_util::
ScatterUpdateShardingFromIndexIndexPassthroughDimensions(
scatter_indices->sharding(), &scatter_user)
: HloSharding::Replicate();
if (is_spmd) {
auto from_output = hlo_sharding_util::ScatterUpdateShardingFromOutput(
user.sharding().IsTuple()
? user.sharding().GetSubSharding(user.shape(), {update_index})
: user.sharding(),
scatter_user, call_graph);
if (from_output.has_value()) {
hlo_sharding_util::MergeShardingIfCompatible(from_indices,
&*from_output);
if (!from_output->IsTileMaximal()) {
return from_output;
}
}
}
if (!from_indices.IsTileMaximal()) {
return from_indices;
}
return std::nullopt;
}
case HloOpcode::kCustomCall: {
bool compatible_shapes = ShapeUtil::CompatibleIgnoringElementType(
instruction.shape(), user.shape());
if (!compatible_shapes) {
return std::nullopt;
}
if (!sharding_helper) {
return user.sharding();
}
if (sharding_helper->CanPropagateShardingToOperands(&user)) {
return user.sharding();
}
return std::nullopt;
}
default: {
if (ShapeUtil::CompatibleIgnoringElementType(instruction.shape(),
user.shape())) {
return user.sharding();
}
return std::nullopt;
}
}
}
bool AggressiveConcatOperandShardingCanPassThrough(
const HloInstruction* concat_operand) {
return (
hlo_sharding_util::IsSpatiallyPartitioned(concat_operand) &&
(concat_operand->has_sharding() &&
concat_operand->sharding().NumTiles() > 1) &&
concat_operand->opcode() == HloOpcode::kReshape &&
(concat_operand->operand(0)->opcode() == HloOpcode::kParameter ||
concat_operand->operand(0)->opcode() == HloOpcode::kGetTupleElement));
}
bool InferDynamicUpdateSliceShardingFromOperand1(
HloInstruction* instruction, bool may_combine_partial_sharding) {
CHECK(instruction->opcode() == HloOpcode::kDynamicSlice ||
instruction->opcode() == HloOpcode::kDynamicUpdateSlice);
const HloInstruction* operand =
instruction->opcode() == HloOpcode::kDynamicSlice
? instruction->operand(0)
: instruction->operand(1);
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
CHECK(!operand->sharding().IsManual());
std::vector<int64_t> slice_dims;
for (int64_t i = 0; i < instruction->shape().rank(); ++i) {
if (instruction->shape().dimensions(i) != operand->shape().dimensions(i)) {
slice_dims.push_back(i);
}
}
return MaybeImproveInstructionSharding(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
operand->sharding(), slice_dims),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
bool InferDynamicUpdateSliceShardingFromOperand0(
HloInstruction* instruction, bool may_combine_partial_sharding) {
CHECK_EQ(instruction->opcode(), HloOpcode::kDynamicUpdateSlice);
if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
return false;
}
return MaybeImproveInstructionSharding(instruction->operand(0)->sharding(),
instruction,
may_combine_partial_sharding);
}
bool ShardingPropagation::InferShardingFromShardGroup(
HloInstruction* instruction, int64_t aggressiveness,
const absl::flat_hash_set<HloInstruction*>& shard_group) {
if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) {
return false;
}
if (instruction->has_sharding() && instruction->sharding().IsManual()) {
return false;
}
if (instruction->IsCustomCall(spmd::kShardBarrierTo)) {
return false;
}
if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) {
for (const HloInstruction* member : shard_group) {
if (!member->has_sharding() || !member->sharding().IsManual() ||
member == instruction) {
continue;
}
instruction->set_sharding(member->sharding());
return true;
}
}
const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0;
bool changed = false;
for (const HloInstruction* member : shard_group) {
if (member == instruction ||
member->IsCustomCall(spmd::kShardBarrierFrom)) {
continue;
}
changed |= MaybeImproveInstructionSharding(member->sharding(), instruction,
may_combine_partial_sharding);
}
return changed;
}
bool ShardingPropagation::InferShardingFromOperands(
HloInstruction* instruction, const ComputationMap& computation_map,
int64_t aggressiveness, const CallGraph& call_graph,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) {
return false;
}
if (instruction->has_sharding() && instruction->sharding().IsManual()) {
return false;
}
const bool custom_call_condition =
instruction->opcode() == HloOpcode::kCustomCall &&
instruction->shape().IsTuple();
const bool async_instr_condition =
instruction->IsAsynchronous() &&
!HloInstruction::IsThreadIncluded(instruction->async_execution_thread(),
execution_threads);
if ((!instruction->has_sharding() ||
instruction->sharding().IsTileMaximal()) &&
(instruction->shape().IsArray() ||
instruction->opcode() == HloOpcode::kReduce ||
instruction->opcode() == HloOpcode::kSort ||
instruction->opcode() == HloOpcode::kReduceWindow ||
custom_call_condition || async_instr_condition)) {
for (const HloInstruction* op : instruction->operands()) {
if (!op->has_sharding() || !op->sharding().IsManual()) continue;
if (instruction->IsCustomCall("SPMDShardToFullShape")) {
return false;
}
if (aggressiveness == 0 &&
(instruction->opcode() == HloOpcode::kConcatenate ||
instruction->opcode() == HloOpcode::kDynamicSlice)) {
return false;
}
instruction->set_sharding(
HloSharding::Manual(op->sharding().metadata())
.NormalizeTupleSharding(instruction->shape()));
return true;
}
}
const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0;
if (!SupportSpatialPartitioning(
instruction, computation_map, is_spmd_,
allow_spmd_sharding_propagation_to_output_,
false,
sharding_helper_.get())) {
if (instruction->shape().IsTuple() || instruction->operand_count() == 0 ||
instruction == instruction->parent()->root_instruction() ||
instruction->HasSideEffect()) {
return false;
}
for (const HloInstruction* op : instruction->operands()) {
if (op->has_sharding() && op->sharding().IsTileMaximal() &&
!op->sharding().HasUniqueDevice()) {
return MaybeImproveInstructionSharding(op->sharding(), instruction,
may_combine_partial_sharding);
}
}
return false;
}
auto get_maybe_tuple_sharding = [&](HloSharding sharding) {
if (instruction->shape().IsArray()) {
return sharding;
}
std::vector<HloSharding> tuple(instruction->shape().tuple_shapes_size(),
std::move(sharding));
return HloSharding::Tuple(instruction->shape(), tuple);
};
switch (instruction->opcode()) {
case HloOpcode::kGetTupleElement: {
const HloInstruction* operand = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
HloSharding new_sharding = operand->sharding().GetSubSharding(
operand->shape(), {instruction->tuple_index()});
if (new_sharding.IsManual()) {
instruction->set_sharding(std::move(new_sharding));
return true;
}
return MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
case HloOpcode::kTuple: {
if (absl::c_none_of(
instruction->operands(), [](const HloInstruction* hlo) {
return hlo_sharding_util::IsSpatiallyPartitioned(hlo);
})) {
return false;
}
const Shape& shape = instruction->shape();
std::vector<HloSharding> sub_shardings;
if (instruction->has_sharding()) {
sub_shardings = instruction->sharding().tuple_elements();
} else {
sub_shardings.assign(HloSharding::RequiredLeaves(shape),
HloSharding::Replicate());
}
auto is_more_specific = [instruction](const HloSharding& operand_sharding,
const HloSharding& existing) {
return !instruction->has_sharding() ||
hlo_sharding_util::IsShardingMoreSpecific(operand_sharding,
existing);
};
int64_t sub_sharding_index = 0;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const HloInstruction* operand = instruction->operand(i);
if (operand->has_sharding()) {
if (operand->shape().IsTuple()) {
for (int64_t j = 0, e = ShapeUtil::GetLeafCount(operand->shape());
j < e; ++j) {
if (is_more_specific(operand->sharding().tuple_elements()[j],
sub_shardings[sub_sharding_index + j])) {
sub_shardings[sub_sharding_index + j] =
operand->sharding().tuple_elements()[j];
}
}
} else {
std::optional<HloSharding> op_sharding =
hlo_sharding_util::GetOutputSharding(operand);
CHECK(op_sharding.has_value())
<< "Expected sharding for " << operand->ToString();
if (is_more_specific(op_sharding.value(),
sub_shardings[sub_sharding_index])) {
sub_shardings[sub_sharding_index] = op_sharding.value();
}
}
}
sub_sharding_index += ShapeUtil::GetLeafCount(operand->shape());
}
HloSharding new_sharding = HloSharding::Tuple(shape, sub_shardings);
if (!instruction->has_sharding() ||
new_sharding != instruction->sharding()) {
instruction->set_sharding(std::move(new_sharding));
return true;
}
return false;
}
case HloOpcode::kReduce: {
return InferReduceShardingFromOperand(
instruction, may_combine_partial_sharding, is_spmd_);
}
case HloOpcode::kBroadcast: {
if (aggressiveness < 3) {
return false;
}
const HloInstruction* op = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(op) ||
op->sharding().IsReplicated()) {
return false;
}
std::vector<int64_t> target_tile_assignment_dimensions;
const auto& dimensions = instruction->dimensions();
for (int64_t i = 0; i < instruction->shape().rank(); ++i) {
auto it = absl::c_find(dimensions, i);
if (it == dimensions.end()) {
target_tile_assignment_dimensions.push_back(1);
} else {
const int64_t source_dim = std::distance(dimensions.begin(), it);
target_tile_assignment_dimensions.push_back(
op->sharding().tile_assignment().dim(source_dim));
}
}
for (int64_t i = op->sharding().TiledDataRank();
i < op->sharding().tile_assignment().num_dimensions(); ++i) {
target_tile_assignment_dimensions.push_back(
op->sharding().tile_assignment().dim(i));
}
auto new_tile_assignment = op->sharding().tile_assignment().Reshape(
target_tile_assignment_dimensions);
HloSharding new_sharding =
op->sharding().ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_tile_assignment,
op->sharding().metadata())
: HloSharding::Subgroup(new_tile_assignment,
op->sharding().subgroup_types(),
op->sharding().metadata());
return MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kConcatenate: {
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
if (aggressiveness == 0) {
for (const HloInstruction* concat_operand : instruction->operands()) {
if (!AggressiveConcatOperandShardingCanPassThrough(concat_operand)) {
return false;
}
const auto& tile_assignment =
concat_operand->sharding().tile_assignment();
for (int64_t i = 0; i < instruction->shape().rank(); ++i) {
if (absl::c_linear_search(instruction->dimensions(), i) &&
tile_assignment.dim(i) > 1) {
return false;
}
}
}
}
return MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kConvolution:
return InferConvolutionShardingFromOperands(
instruction, call_graph, aggressiveness, may_combine_partial_sharding,
is_spmd_);
case HloOpcode::kTranspose: {
const HloInstruction* input = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(input)) {
return false;
}
HloSharding sharding = hlo_sharding_util::TransposeSharding(
input->sharding(), instruction->dimensions());
return MaybeImproveInstructionSharding(
std::move(sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(instruction);
auto has_dilation = [](const WindowDimension& dimensions) {
return dimensions.base_dilation() > 1 ||
dimensions.window_dilation() > 1;
};
if (absl::c_any_of(instruction->window().dimensions(), has_dilation)) {
VLOG(2) << "Not applying sharding to reduce window because dilatation "
"isn't supported yet: "
<< reduce_window->ToString();
return false;
}
bool changed = false;
for (HloInstruction* operand : reduce_window->inputs()) {
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
continue;
}
changed |= MaybeImproveInstructionSharding(
get_maybe_tuple_sharding(operand->sharding()), reduce_window,
may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
return changed;
}
case HloOpcode::kSelectAndScatter: {
const HloInstruction* lhs = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) {
return false;
}
auto has_base_dilation = [](const WindowDimension& dimensions) {
return dimensions.base_dilation() > 1;
};
if (absl::c_any_of(instruction->window().dimensions(),
has_base_dilation)) {
VLOG(2) << "Not applying sharding to select-and-scatter because "
"base dilation isn't supported yet: "
<< instruction->ToString();
return false;
}
return MaybeImproveInstructionSharding(
lhs->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kReshape: {
if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
return false;
}
HloSharding new_sharding =
hlo_sharding_util::PropagateShardingThroughReshape(
instruction->operand(0)->shape(), instruction->shape(),
instruction->operand(0)->sharding());
return MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
return false;
}
case HloOpcode::kReverse: {
const HloInstruction* operand = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
return MaybeImproveInstructionSharding(
hlo_sharding_util::ReverseSharding(operand->sharding(),
instruction->dimensions()),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kDot: {
const auto& dnums =
dot_as_convolution_util::ParseDotGeneralFromDot(instruction);
return InferDotShardingFromOperands(instruction, call_graph, dnums,
may_combine_partial_sharding,
is_spmd_);
}
case HloOpcode::kParameter: {
auto parent_it = computation_map.find(instruction->parent());
if (parent_it == computation_map.end()) {
return false;
}
const HloInstruction* parent = parent_it->second;
switch (parent->opcode()) {
case HloOpcode::kConditional: {
for (int64_t i = 1; i < parent->operand_count(); ++i) {
if (parent->called_computations()[i - 1] == instruction->parent()) {
if (parent->operand(i)->has_sharding()) {
return MaybeImproveInstructionSharding(
parent->operand(i)->sharding(), instruction,
may_combine_partial_sharding);
}
return false;
}
}
return false;
}
case HloOpcode::kCall: {
int64_t i = instruction->parameter_number();
if (parent->operand(i)->has_sharding()) {
return MaybeImproveInstructionSharding(
parent->operand(i)->sharding(), instruction,
may_combine_partial_sharding);
}
return false;
}
default:
return false;
}
}
case HloOpcode::kSort: {
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
HloSortInstruction* sort = DynCast<HloSortInstruction>(instruction);
CHECK(sort);
const int64_t sort_dim = sort->sort_dimension();
if (!operand->sharding().IsTileMaximal() &&
operand->sharding().tile_assignment().dim(sort_dim) != 1) {
if (!hlo_sharding_util::IsSortOperandShardingMovable(operand, sort_dim))
return false;
}
if (instruction->shape().IsTuple()) {
return MaybeImproveInstructionSharding(
HloSharding::SingleTuple(instruction->shape(), operand->sharding()),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
} else {
return MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
}
case HloOpcode::kDynamicSlice: {
return InferDynamicUpdateSliceShardingFromOperand1(
instruction, may_combine_partial_sharding);
}
case HloOpcode::kDynamicUpdateSlice: {
bool changed = InferDynamicUpdateSliceShardingFromOperand1(
instruction, may_combine_partial_sharding);
changed |= InferDynamicUpdateSliceShardingFromOperand0(
instruction, may_combine_partial_sharding);
return changed;
}
case HloOpcode::kGather: {
bool changed = false;
const GatherDimensionNumbers& dnums =
instruction->gather_dimension_numbers();
if (!dnums.operand_batching_dims().empty()) {
hlo_sharding_util::GatherScatterParallelDims explict_batch_dims;
explict_batch_dims.operand_parallel_dims.assign(
dnums.operand_batching_dims().begin(),
dnums.operand_batching_dims().end());
explict_batch_dims.indices_parallel_dims.assign(
dnums.start_indices_batching_dims().begin(),
dnums.start_indices_batching_dims().end());
changed |= InferGatherParallelShardingFromOperands(
instruction, explict_batch_dims, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
HloSharding new_sharding = hlo_sharding_util::
GatherOutputShardingFromIndexIndexPassthroughDimensions(
instruction->operand(1)->sharding(), instruction);
changed |= MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding);
}
if (is_spmd_) {
auto gather_parallel_dims =
hlo_sharding_util::GetGatherParallelBatchDims(*instruction,
call_graph);
if (gather_parallel_dims) {
changed |= InferGatherParallelShardingFromOperands(
instruction, *gather_parallel_dims, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(
instruction->operand(0))) {
absl::Span<const int64_t> operand_parallel_dims;
if (gather_parallel_dims) {
operand_parallel_dims = absl::MakeConstSpan(
gather_parallel_dims->operand_parallel_dims);
}
HloSharding filtered_operand_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
instruction->operand(0)->sharding(), operand_parallel_dims);
auto maybe_from_data = hlo_sharding_util::
GatherOutputShardingFromOperandOperandPassthroughDimensions(
filtered_operand_sharding, *instruction);
if (maybe_from_data) {
changed |= MaybeImproveInstructionSharding(
std::move(*maybe_from_data), instruction,
may_combine_partial_sharding);
}
}
}
return changed;
}
case HloOpcode::kScatter: {
auto& scatter = *Cast<HloScatterInstruction>(instruction);
bool changed = false;
const ScatterDimensionNumbers& dnums =
instruction->scatter_dimension_numbers();
if (!dnums.input_batching_dims().empty()) {
hlo_sharding_util::GatherScatterParallelDims explict_batch_dims;
explict_batch_dims.operand_parallel_dims.assign(
dnums.input_batching_dims().begin(),
dnums.input_batching_dims().end());
explict_batch_dims.indices_parallel_dims.assign(
dnums.scatter_indices_batching_dims().begin(),
dnums.scatter_indices_batching_dims().end());
changed |= InferScatterParallelShardingFromOperands(
instruction, explict_batch_dims, may_combine_partial_sharding);
}
const int64_t operand_count = scatter.scatter_operand_count();
auto scatter_operands = scatter.scatter_operands();
auto scatter_indices = scatter.scatter_indices();
auto scatter_updates = scatter.scatter_updates();
if (is_spmd_) {
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {
changed |= MaybeImproveInstructionSubSharding(
scatter_operands[i]->sharding(), instruction, {i},
may_combine_partial_sharding);
}
}
if (!hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices) &&
absl::c_none_of(scatter_updates, [](const HloInstruction* update) {
return hlo_sharding_util::IsSpatiallyPartitioned(update);
})) {
return changed;
}
if (auto scatter_parallel_dims =
hlo_sharding_util::GetScatterParallelBatchDims(*instruction,
call_graph)) {
changed |= InferScatterParallelShardingFromOperands(
instruction, *scatter_parallel_dims,
may_combine_partial_sharding);
}
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {
auto maybe_from_update =
hlo_sharding_util::ScatterOutputShardingFromUpdate(
scatter_updates[i]->sharding(), scatter);
if (maybe_from_update) {
changed |= MaybeImproveInstructionSubSharding(
std::move(*maybe_from_update), instruction, {i},
may_combine_partial_sharding);
}
}
}
} else {
for (int64_t i = 0; i != operand_count; ++i) {
changed |= MaybeImproveInstructionSubSharding(
HloSharding::Replicate(), instruction, {i},
may_combine_partial_sharding);
}
}
return changed;
}
case HloOpcode::kWhile: {
if (!instruction->operand(0)->has_sharding()) {
return false;
}
auto sharding = instruction->operand(0)->sharding();
if (instruction->has_sharding()) {
hlo_sharding_util::MergeSharding(instruction->sharding(), &sharding,
may_combine_partial_sharding);
}
return MaybeImproveInstructionSharding(std::move(sharding), instruction,
may_combine_partial_sharding);
}
case HloOpcode::kCustomCall: {
HloSharding inferred_operand_sharding = HloSharding::Replicate();
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target());
partitioner && partitioner->IsCustomCallShardable(instruction)) {
if (auto sharding =
partitioner->InferShardingFromOperands(instruction)) {
inferred_operand_sharding = *sharding;
} else {
return false;
}
} else if (sharding_helper_->IsCustomCallShardable(instruction)) {
if (auto sharding =
sharding_helper_->InferShardingFromOperands(instruction)) {
inferred_operand_sharding = *sharding;
} else {
return false;
}
} else {
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
inferred_operand_sharding = operand->sharding();
}
return MaybeImproveInstructionSharding(
inferred_operand_sharding, instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
default: {
if (instruction->IsElementwise() && may_combine_partial_sharding) {
bool changed = false;
for (auto operand : instruction->operands()) {
if (hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
if (instruction->opcode() == HloOpcode::kRng) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::ReplicateAllDataDims(
operand->sharding(), instruction->shape().rank()),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
changed |= MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
instruction->operands().size() == 1 &&
ComputeNonRootUsers(instruction) == 1);
}
}
return changed;
}
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
return MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
}
return false;
}
bool ShardingPropagation::InferShardingFromUsers(
HloInstruction* instruction,
const ShardingPropagation::ComputationMap& computation_map,
int64_t aggressiveness, bool is_spmd,
const CustomCallShardingHelper* sharding_helper,
const CallGraph& call_graph) {
if (aggressiveness < 2 && instruction->opcode() == HloOpcode::kBroadcast) {
return false;
}
if (instruction->has_sharding() && instruction->sharding().IsManual()) {
return false;
}
if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) {
for (const HloInstruction* user : instruction->users()) {
if (!user->has_sharding() || user->IsCustomCall("SPMDFullToShardShape"))
continue;
if (instruction->shape().IsArray() && user->sharding().IsManual()) {
instruction->set_sharding(
HloSharding::Manual(user->sharding().metadata()));
return true;
} else {
std::optional<HloSharding> user_sharding =
ShardingPropagation::GetShardingFromUser(
*instruction, *user, aggressiveness, is_spmd, call_graph,
sharding_helper);
if (user_sharding && user_sharding->IsManual()) {
instruction->set_sharding(std::move(*user_sharding));
return true;
}
}
}
}
if (!SupportSpatialPartitioning(
instruction, computation_map, is_spmd,
false,
allow_spmd_sharding_propagation_to_parameters_, sharding_helper)) {
return false;
}
bool improved_sharding = false;
const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0;
for (const HloInstruction* user : instruction->users()) {
if (user->opcode() == HloOpcode::kRngBitGenerator) {
instruction->set_sharding(HloSharding::Replicate());
return true;
}
std::optional<HloSharding> user_sharding =
ShardingPropagation::GetShardingFromUser(*instruction, *user,
aggressiveness, is_spmd,
call_graph, sharding_helper);
if (user_sharding && instruction->opcode() == HloOpcode::kCustomCall) {
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target())) {
if (partitioner->IsCustomCallShardable(instruction)) {
user_sharding = partitioner->PropagateUserSharding(instruction, user,
*user_sharding);
}
} else if (sharding_helper->IsCustomCallShardable(instruction)) {
user_sharding = sharding_helper->PropagateUserSharding(
instruction, user, *user_sharding);
}
}
if (user_sharding) {
improved_sharding |= MaybeImproveInstructionSharding(
std::move(*user_sharding), instruction, may_combine_partial_sharding);
}
}
return improved_sharding;
}
void ShardingPropagation::MaybeComputationPropagation(
const ComputationMap& computation_map,
const absl::flat_hash_set<const HloInstruction*>& provided_shardings,
HloInstruction* instruction,
absl::flat_hash_set<HloInstruction*>* changed) {
auto propagate_to_instruction = [&](HloInstruction* search_inst) {
auto related_instructions =
GetRelatedInstructions(search_inst, computation_map);
if (absl::c_count(related_instructions, instruction)) {
for (HloInstruction* inst : related_instructions) {
if ((!inst->has_sharding() ||
inst->sharding() != instruction->sharding()) &&
!provided_shardings.contains(inst)) {
VLOG(2) << "Add computation sharding: " << inst->name() << " "
<< instruction->sharding().ToString();
inst->copy_sharding(instruction);
changed->insert(inst);
MaybeComputationPropagation(computation_map, provided_shardings, inst,
changed);
}
}
}
};
if (instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kCustomCall ||
instruction->opcode() == HloOpcode::kCall) {
propagate_to_instruction(instruction);
}
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->parent()->root_instruction() == instruction) {
auto it = computation_map.find(instruction->parent());
if (it != computation_map.end()) {
propagate_to_instruction(it->second);
if (instruction->opcode() == HloOpcode::kParameter &&
(it->second->opcode() == HloOpcode::kConditional ||
it->second->opcode() == HloOpcode::kCall)) {
propagate_to_instruction(instruction);
}
}
}
}
absl::StatusOr<bool> ShardingPropagation::RunToFixPoint(
int64_t aggressiveness, bool propagate_shard_group,
const ComputationMap& computation_map,
const absl::flat_hash_set<const HloInstruction*>& provided_shardings,
const CallGraph& call_graph, HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>&
unspecified_dims,
absl::flat_hash_map<HloInstruction*, int64_t>&
instruction_to_shard_group_id,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>&
shard_group_id_to_shard_as_group,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>&
shard_group_id_to_shard_like_group,
int64_t& iterations) {
bool changed = false;
absl::flat_hash_set<const HloInstruction*> already_inferred_from_shard_group;
absl::flat_hash_set<const HloInstruction*> already_inferred_from_operands;
absl::flat_hash_set<const HloInstruction*> already_inferred_from_users;
bool changed_last_iter = true;
const bool may_merge_partial = is_spmd_ && aggressiveness > 0;
while (changed_last_iter) {
changed_last_iter = false;
int64_t inferred_from_shard_group_counter = 0;
int64_t inferred_from_operand_counter = 0;
int64_t inferred_from_user_counter = 0;
int64_t instruction_counter = 0;
int64_t already_sharded_counter = 0;
for (const HloComputation* computation :
module->computations(execution_threads)) {
VLOG(2) << "Consider computation: " << computation->name();
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder();
instruction_counter += instructions.size();
already_sharded_counter += absl::c_count_if(
instructions,
[](const HloInstruction* inst) { return inst->has_sharding(); });
auto clear_cache = [&](HloInstruction* hlo,
HloInstruction* hlo_for_users = nullptr) {
for (auto operand : hlo->operands()) {
already_inferred_from_users.erase(operand);
}
if (hlo_for_users == nullptr) {
hlo_for_users = hlo;
}
for (auto user : hlo_for_users->users()) {
already_inferred_from_operands.erase(user);
for (auto c : user->called_computations()) {
for (auto parameter : c->parameter_instructions()) {
already_inferred_from_operands.erase(parameter);
}
}
}
if (instruction_to_shard_group_id.contains(hlo)) {
const int64_t shard_group_id = instruction_to_shard_group_id.at(hlo);
const absl::flat_hash_set<HloInstruction*>& shard_group =
shard_group_id_to_shard_as_group.contains(shard_group_id)
? shard_group_id_to_shard_as_group.at(shard_group_id)
: shard_group_id_to_shard_like_group.at(shard_group_id);
for (HloInstruction* member : shard_group) {
if (member != hlo) {
already_inferred_from_shard_group.erase(member);
}
}
}
};
if (propagate_shard_group) {
for (HloInstruction* instruction : instructions) {
if (already_inferred_from_shard_group.contains(instruction)) {
continue;
}
if (!instruction_to_shard_group_id.contains(instruction)) {
continue;
}
const int64_t shard_group_id =
instruction_to_shard_group_id.at(instruction);
const absl::flat_hash_set<HloInstruction*>& shard_group =
shard_group_id_to_shard_as_group.contains(shard_group_id)
? shard_group_id_to_shard_as_group.at(shard_group_id)
: shard_group_id_to_shard_like_group.at(shard_group_id);
if (provided_shardings.contains(instruction)) {
if (!may_merge_partial) {
continue;
}
auto it = unspecified_dims.find(instruction);
if (it != unspecified_dims.end() &&
InferUnspecifiedDimsFromShardGroup(instruction, it->second,
shard_group)) {
++inferred_from_shard_group_counter;
VLOG(2) << "Refined partial sharding (shard group): "
<< instruction->ToString();
clear_cache(instruction);
already_inferred_from_shard_group.insert(instruction);
changed_last_iter = true;
}
continue;
}
already_inferred_from_shard_group.insert(instruction);
if (InferShardingFromShardGroup(instruction, aggressiveness,
shard_group)) {
++inferred_from_shard_group_counter;
changed = true;
VLOG(2) << "Add sharding (shard group): "
<< instruction->ToString();
absl::flat_hash_set<HloInstruction*> changed_in_comp_prop;
MaybeComputationPropagation(computation_map, provided_shardings,
instruction, &changed_in_comp_prop);
clear_cache(instruction);
for (auto hlo : changed_in_comp_prop) {
clear_cache(hlo);
}
changed_last_iter = true;
}
}
}
for (HloInstruction* instruction : instructions) {
if (already_inferred_from_operands.contains(instruction)) {
continue;
}
if (provided_shardings.contains(instruction)) {
if (!may_merge_partial) {
continue;
}
auto it = unspecified_dims.find(instruction);
HloInstruction* man_conversion_op_after;
if (it != unspecified_dims.end() &&
InferUnspecifiedDimsFromOperand(instruction, it->second,
&man_conversion_op_after)) {
++inferred_from_operand_counter;
VLOG(2) << "Refined partial sharding (forward-pass): "
<< instruction->ToString();
clear_cache(instruction, man_conversion_op_after);
already_inferred_from_operands.insert(instruction);
changed_last_iter = true;
}
continue;
}
already_inferred_from_operands.insert(instruction);
if (InferShardingFromOperands(instruction, computation_map,
aggressiveness, call_graph,
execution_threads)) {
++inferred_from_operand_counter;
changed = true;
VLOG(2) << "Add sharding (forward-pass): " << instruction->ToString();
absl::flat_hash_set<HloInstruction*> changed_in_comp_prop;
MaybeComputationPropagation(computation_map, provided_shardings,
instruction, &changed_in_comp_prop);
clear_cache(instruction);
for (auto hlo : changed_in_comp_prop) {
clear_cache(hlo);
}
changed_last_iter = true;
}
}
for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {
if ((*it)->IsCustomCall("SPMDFullToShardShape") ||
(*it)->IsCustomCall("SPMDShardToFullShape")) {
if (!already_inferred_from_users.contains(*it)) {
already_inferred_from_users.erase((*it)->operand(0));
}
}
if (already_inferred_from_users.contains(*it)) {
continue;
}
if (provided_shardings.contains(*it)) {
if (!may_merge_partial) {
continue;
}
auto uit = unspecified_dims.find(*it);
HloInstruction* man_conversion_op_after;
if (uit != unspecified_dims.end() &&
InferUnspecifiedDimsFromUsers(*it, uit->second, aggressiveness,
is_spmd_, &man_conversion_op_after,
call_graph)) {
++inferred_from_user_counter;
VLOG(2) << "Refined partial sharding (backward-pass): "
<< (*it)->ToString();
clear_cache(*it, man_conversion_op_after);
already_inferred_from_users.insert(*it);
if (man_conversion_op_after != nullptr) {
already_inferred_from_users.insert(man_conversion_op_after);
}
changed_last_iter = true;
}
continue;
}
already_inferred_from_users.insert(*it);
if (InferShardingFromUsers(*it, computation_map, aggressiveness,
is_spmd_, sharding_helper_.get(),
call_graph)) {
++inferred_from_user_counter;
changed = true;
VLOG(2) << "Add sharding (backward-pass): " << (*it)->ToString();
absl::flat_hash_set<HloInstruction*> changed_in_comp_prop;
MaybeComputationPropagation(computation_map, provided_shardings, *it,
&changed_in_comp_prop);
clear_cache(*it);
for (auto hlo : changed_in_comp_prop) {
clear_cache(hlo);
}
changed_last_iter = true;
}
}
}
VLOG(1) << "Sharding propagation iteration " << iterations << ";"
<< "\n total instructions: " << instruction_counter
<< "\n instructions already sharded: " << already_sharded_counter
<< "\n shardings inferred from shard group: "
<< inferred_from_shard_group_counter
<< "\n shardings inferred from operands: "
<< inferred_from_operand_counter
<< "\n shardings inferred from users: "
<< inferred_from_user_counter
<< "\n aggressiveness: " << aggressiveness;
++iterations;
}
return changed;
}
std::vector<HloInstruction*> ShardingPropagation::GetRelatedInstructions(
HloInstruction* inst, const ComputationMap& computation_map) {
if (inst->opcode() == HloOpcode::kWhile) {
return std::vector<HloInstruction*>{
inst, inst->while_body()->root_instruction(),
inst->while_body()->parameter_instruction(0),
inst->while_condition()->parameter_instruction(0)};
} else if (inst->opcode() == HloOpcode::kConditional) {
const auto& called_computations = inst->called_computations();
std::vector<HloInstruction*> comps;
comps.reserve(called_computations.size() + 1);
comps.push_back(inst);
for (HloComputation* c : called_computations) {
comps.push_back(c->root_instruction());
}
return comps;
} else if (inst->opcode() == HloOpcode::kCustomCall) {
if (sharding_helper_ && sharding_helper_->IsCustomCallShardable(inst)) {
return sharding_helper_->GetRelatedInstructions(inst);
} else {
return std::vector<HloInstruction*>{};
}
} else if (inst->opcode() == HloOpcode::kCall) {
HloComputation* callee = inst->called_computations().front();
return std::vector<HloInstruction*>{inst, callee->root_instruction()};
} else if (inst->opcode() == HloOpcode::kParameter) {
auto it = computation_map.find(inst->parent());
if (it != computation_map.end()) {
if (it->second->opcode() == HloOpcode::kConditional) {
HloInstruction* cond = it->second;
for (int64_t i = 1; i < cond->operand_count(); ++i) {
if (cond->called_computations()[i - 1] == inst->parent()) {
return std::vector<HloInstruction*>{inst, cond->mutable_operand(i)};
}
}
}
if (it->second->opcode() == HloOpcode::kCall) {
HloInstruction* call = it->second;
int64_t operand_index = inst->parameter_number();
CHECK_LT(operand_index, call->operand_count());
return std::vector<HloInstruction*>{
inst, call->mutable_operand(operand_index)};
}
}
return std::vector<HloInstruction*>{};
} else {
CHECK(false);
}
};
absl::StatusOr<bool> ShardingPropagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
ABSL_CONST_INIT static absl::once_flag did_registration;
absl::call_once(did_registration, [] {
RegisterCustomCallPartitioner(
spmd::kShardBarrierFrom,
std::make_unique<spmd::ShardBarrierFromPartitioner>());
RegisterCustomCallPartitioner(
spmd::kShardBarrierTo,
std::make_unique<spmd::ShardBarrierToPartitioner>());
});
std::optional<absl::flat_hash_map<const HloInstruction*, HloSharding>>
original_sharding;
bool any_changed = false;
if (cse_prevention_only_) {
original_sharding.emplace();
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->has_sharding()) {
original_sharding->emplace(instruction, instruction->sharding());
}
}
}
} else {
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->has_sharding() &&
IsCSEPreventionSharding(instruction->sharding())) {
instruction->clear_sharding();
any_changed = true;
}
}
}
}
any_changed |= propagate_metadata_
? AssignShardingMetadata(module, execution_threads)
: RemoveShardingMetadata(module, execution_threads);
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>
unspecified_dims;
std::vector<HloSharding> saved_root_shardings;
absl::flat_hash_map<int64_t, HloSharding> saved_parameter_shardings;
absl::flat_hash_map<HloInstruction*, int64_t> instruction_to_shard_group_id;
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>
shard_group_id_to_shard_as_group;
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>
shard_group_id_to_shard_like_group;
TF_ASSIGN_OR_RETURN(
bool changed,
ProcessShardingInstruction(
module, execution_threads, !cse_prevention_only_, &unspecified_dims,
allow_spmd_sharding_propagation_to_output_ ? &saved_root_shardings
: nullptr,
allow_spmd_sharding_propagation_to_parameters_
? &saved_parameter_shardings
: nullptr,
&instruction_to_shard_group_id, &shard_group_id_to_shard_as_group,
&shard_group_id_to_shard_like_group,
&allow_spmd_sharding_propagation_to_parameters_vector_));
any_changed |= changed;
for (const auto& [shard_group_id, shard_as_group] :
shard_group_id_to_shard_as_group) {
VLOG(5) << "Shard-As group " << shard_group_id << " contains:";
for (auto instruction : shard_as_group) {
VLOG(5) << " " << instruction->ToString();
}
}
for (const auto& [shard_group_id, shard_like_group] :
shard_group_id_to_shard_like_group) {
VLOG(5) << "Shard-Like group " << shard_group_id << " contains:";
for (auto instruction : shard_like_group) {
VLOG(5) << " " << instruction->ToString();
}
}
if (allow_spmd_sharding_propagation_to_output_) {
CHECK(!module->entry_computation()->root_instruction()->has_sharding() ||
allow_spmd_sharding_propagation_to_output_vector_.size() == 1 ||
module->entry_computation()
->root_instruction()
->sharding()
.tuple_elements()
.size() ==
allow_spmd_sharding_propagation_to_output_vector_.size())
<< "allow-spmd-sharding-propagation-to-output-vector's size can be "
"either 1 or the number of elements in the root tuple of entry "
"computation.";
}
if (allow_spmd_sharding_propagation_to_parameters_) {
auto is_same_sized_tuple = [](HloModule* module, int64_t size) {
if (module->entry_computation()->num_parameters() != 1) {
return false;
}
HloInstruction* param =
module->entry_computation()->parameter_instruction(0);
return param->shape().IsTuple() &&
size == param->shape().tuple_shapes_size();
};
auto size = allow_spmd_sharding_propagation_to_parameters_vector_.size();
CHECK(size == 1 || size == module->entry_computation()->num_parameters() ||
is_same_sized_tuple(module, size))
<< "allow-spmd-sharding-propagation-to-parameters-vector's size can be "
"either 1 or the number of parameters in the entry computation.";
}
ComputationMap computation_map;
absl::flat_hash_set<const HloInstruction*> provided_shardings;
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
TF_RETURN_IF_ERROR(
CheckAndUpdateDeviceAssignmentsInWhileBody(instruction));
}
}
}
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kCall) {
const HloInstruction* sharded_inst = nullptr;
auto related_instructions =
GetRelatedInstructions(instruction, computation_map);
for (auto inst : related_instructions) {
if (inst->has_sharding()) {
sharded_inst = inst;
break;
}
}
if (sharded_inst != nullptr) {
for (auto inst : related_instructions) {
inst->copy_sharding(sharded_inst);
}
}
if (instruction->opcode() == HloOpcode::kWhile) {
computation_map[instruction->while_body()] = instruction;
computation_map[instruction->while_condition()] = instruction;
} else {
for (HloComputation* c : instruction->called_computations()) {
computation_map[c] = instruction;
}
}
}
}
}
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const HloInstruction* inst : computation->instructions()) {
if (inst->has_sharding() &&
inst != module->entry_computation()->root_instruction() &&
inst->opcode() != HloOpcode::kParameter &&
!inst->sharding().IsUnknown()) {
provided_shardings.insert(inst);
}
}
}
HloInstruction* entry_root = module->entry_computation()->root_instruction();
if (!allow_spmd_sharding_propagation_to_output_ &&
(!entry_root->has_sharding() || !entry_root->sharding().IsUnknown())) {
if (entry_root->opcode() == HloOpcode::kWhile) {
HloInstruction* copy = module->entry_computation()->AddInstruction(
HloInstruction::CreateUnary(entry_root->shape(), HloOpcode::kCopy,
entry_root));
if (entry_root->has_sharding()) {
copy->set_sharding(entry_root->sharding());
}
module->entry_computation()->set_root_instruction(copy);
entry_root = copy;
any_changed = true;
}
provided_shardings.insert(entry_root);
}
if (!allow_spmd_sharding_propagation_to_parameters_) {
for (auto param : module->entry_computation()->parameter_instructions()) {
if (param->has_sharding() && !param->sharding().IsUnknown()) {
provided_shardings.insert(param);
}
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
auto instructions = computation->MakeInstructionPostOrder();
for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {
HloInstruction* instruction = *it;
if (instruction->has_sharding() && instruction->sharding().IsUnknown()) {
instruction->set_sharding(
HloSharding::Replicate(instruction->sharding().metadata()));
}
}
}
int64_t iterations = 0;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (int64_t aggressiveness = 0; aggressiveness < 4; ++aggressiveness) {
TF_ASSIGN_OR_RETURN(
bool changed,
RunToFixPoint(aggressiveness, true,
computation_map, provided_shardings, *call_graph, module,
execution_threads, unspecified_dims,
instruction_to_shard_group_id,
shard_group_id_to_shard_as_group,
shard_group_id_to_shard_like_group, iterations));
any_changed = any_changed || changed;
}
for (const auto& [shard_as_group_id, shard_as_group] :
shard_group_id_to_shard_as_group) {
HloSharding default_sharding = HloSharding::Replicate();
std::vector<HloSharding> shardings;
for (HloInstruction* instruction : shard_as_group) {
if (instruction->has_sharding()) {
shardings.push_back(instruction->sharding());
if (!instruction->IsCustomCall(spmd::kShardBarrierFrom) &&
default_sharding.IsReplicated()) {
default_sharding = instruction->sharding();
}
}
}
HloSharding common_sharding = shardings.empty()
? default_sharding
: hlo_sharding_util::FindCommonSharding(
shardings, default_sharding);
VLOG(2) << "Aligning shard group: " << shard_as_group_id
<< " to sharding:" << common_sharding.ToString();
for (HloInstruction* member : shard_as_group) {
if (member->IsCustomCall(spmd::kShardBarrierTo)) {
continue;
}
if (provided_shardings.contains(member)) {
auto it = unspecified_dims.find(member);
if (it != unspecified_dims.end()) {
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
common_sharding, it->second);
HloSharding sharding = member->sharding();
if (hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
member->set_sharding(sharding);
}
}
}
member->set_sharding(common_sharding);
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(spmd::kShardBarrierFrom) &&
instruction_to_shard_group_id.contains(instruction) &&
shard_group_id_to_shard_as_group.contains(
instruction_to_shard_group_id.at(instruction))) {
HloSharding sharding = instruction->sharding();
hlo_sharding_util::MergeShardingIfCompatible(
instruction->mutable_operand(0)->sharding(), sharding.NumTiles(),
&sharding);
instruction->mutable_operand(0)->set_sharding(std::move(sharding));
}
}
}
{
TF_ASSIGN_OR_RETURN(
bool changed,
RunToFixPoint(3, true,
computation_map, provided_shardings, *call_graph, module,
execution_threads, unspecified_dims,
instruction_to_shard_group_id,
shard_group_id_to_shard_as_group,
shard_group_id_to_shard_like_group, iterations));
any_changed = any_changed || changed;
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(spmd::kShardBarrierFrom) &&
instruction_to_shard_group_id.contains(instruction) &&
shard_group_id_to_shard_as_group.contains(
instruction_to_shard_group_id.at(instruction))) {
HloSharding sharding = instruction->sharding();
hlo_sharding_util::MergeShardingIfCompatible(
instruction->mutable_operand(0)->sharding(), sharding.NumTiles(),
&sharding);
instruction->mutable_operand(0)->set_sharding(std::move(sharding));
}
if (instruction->IsCustomCall(spmd::kShardBarrierFrom) ||
instruction->IsCustomCall(spmd::kShardBarrierTo)) {
TF_ASSIGN_OR_RETURN(std::ignore,
computation->ReplaceInstruction(
instruction, instruction->mutable_operand(0),
false,
false,
false));
}
}
}
if (cse_prevention_only_) {
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (!instruction->has_sharding()) {
continue;
}
if (IsCSEPreventionTarget(instruction) && instruction->has_sharding()) {
if (!(*original_sharding).contains(instruction)) {
instruction->set_sharding(
SetCSEPreventionSharding(instruction->sharding()));
}
continue;
}
auto it = (*original_sharding).find(instruction);
if (it != (*original_sharding).end()) {
instruction->set_sharding(it->second);
} else {
instruction->clear_sharding();
}
}
}
}
HloInstruction* root_instruction =
module->entry_computation()->root_instruction();
if (saved_root_shardings.size() ==
allow_spmd_sharding_propagation_to_output_vector_.size() &&
root_instruction->has_sharding()) {
HloSharding root_sharding = root_instruction->sharding();
for (int i = 0; i < saved_root_shardings.size(); ++i) {
if (!allow_spmd_sharding_propagation_to_output_vector_[i] &&
!saved_root_shardings[i].IsUnknown()) {
root_sharding.tuple_elements()[i] = saved_root_shardings[i];
}
}
root_instruction->set_sharding(std::move(root_sharding));
}
auto params = module->entry_computation()->parameter_instructions();
if (allow_spmd_sharding_propagation_to_parameters_) {
if (allow_spmd_sharding_propagation_to_parameters_vector_.size() ==
params.size()) {
for (int64_t i = 0; i < params.size(); ++i) {
if (!allow_spmd_sharding_propagation_to_parameters_vector_[i]) {
if (saved_parameter_shardings.contains(i) &&
!saved_parameter_shardings.at(i).IsUnknown()) {
params[i]->set_sharding(saved_parameter_shardings.at(i));
} else {
params[i]->clear_sharding();
}
}
}
} else if (params.size() == 1 && saved_parameter_shardings.size() == 1 &&
params[0]->shape().IsTuple() &&
params[0]->shape().tuple_shapes_size() ==
allow_spmd_sharding_propagation_to_parameters_vector_
.size()) {
HloSharding param_sharding = params[0]->sharding();
for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) {
HloSharding saved_subsharding =
saved_parameter_shardings.at(0).GetSubSharding(params[0]->shape(),
{i});
if (!allow_spmd_sharding_propagation_to_parameters_vector_[i] &&
!saved_subsharding.IsUnknown()) {
param_sharding.tuple_elements()[i] = saved_subsharding;
}
}
params[0]->set_sharding(std::move(param_sharding));
}
}
std::function<bool(const Shape&, const HloSharding&)> evenly_partitions =
[&evenly_partitions](const Shape& shape,
const HloSharding& sharding) -> bool {
if (!sharding.IsTiled()) {
return true;
}
if (sharding.IsTileMaximal()) {
return sharding.IsReplicated();
}
if (sharding.IsTuple()) {
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
if (!evenly_partitions(ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i}))) {
return false;
}
}
}
for (int64_t i = 0; i < shape.dimensions_size(); ++i) {
if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) {
return false;
}
}
return true;
};
if (allow_spmd_sharding_propagation_to_output_ &&
root_instruction->has_sharding()) {
if (root_instruction->shape().IsTuple() &&
allow_spmd_sharding_propagation_to_output_vector_.size() ==
root_instruction->shape().tuple_shapes_size()) {
HloSharding root_sharding = root_instruction->sharding();
for (int64_t i = 0; i < root_instruction->shape().tuple_shapes_size();
++i) {
if (allow_spmd_sharding_propagation_to_output_vector_[i] &&
!evenly_partitions(root_instruction->shape().tuple_shapes(i),
root_sharding.tuple_elements()[i])) {
root_sharding.tuple_elements()[i] = HloSharding::Replicate();
}
}
root_instruction->set_sharding(std::move(root_sharding));
} else if (!root_instruction->shape().IsTuple()) {
if (!evenly_partitions(root_instruction->shape(),
root_instruction->sharding())) {
root_instruction->set_sharding(HloSharding::Replicate());
}
}
}
if (allow_spmd_sharding_propagation_to_parameters_) {
if (allow_spmd_sharding_propagation_to_parameters_vector_.size() ==
params.size()) {
for (int64_t i = 0; i < params.size(); ++i) {
if (params[i]->has_sharding() &&
allow_spmd_sharding_propagation_to_parameters_vector_[i] &&
!evenly_partitions(params[i]->shape(), params[i]->sharding())) {
params[i]->set_sharding(HloSharding::Replicate());
}
}
} else if (params.size() == 1 && params[0]->shape().IsTuple() &&
params[0]->has_sharding() &&
params[0]->shape().tuple_shapes_size() ==
allow_spmd_sharding_propagation_to_parameters_vector_
.size()) {
HloSharding param_sharding = params[0]->sharding();
for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) {
if (allow_spmd_sharding_propagation_to_parameters_vector_[i] &&
!evenly_partitions(
ShapeUtil::GetSubshapeOneIndex(params[0]->shape(), i),
params[0]->sharding().GetSubSharding(params[0]->shape(),
{i}))) {
param_sharding.tuple_elements()[i] = HloSharding::Replicate();
}
}
params[0]->set_sharding(std::move(param_sharding));
}
}
TF_RETURN_IF_ERROR(
hlo_sharding_util::CanonicalizeLayoutAfterShardingPropagation(
module, allow_spmd_sharding_propagation_to_output_,
allow_spmd_sharding_propagation_to_parameters_));
VLOG(1) << "Sharding propagation completed after " << iterations
<< " iterations";
return any_changed;
}
} | #include "xla/service/sharding_propagation.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ShardingPropagationTest = HloTestBase;
void ClearMetadata(HloModule* module) {
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->metadata().ByteSizeLong() != 0) {
instruction->set_metadata(OpMetadata());
}
if (!instruction->has_sharding()) {
continue;
}
instruction->set_sharding(instruction->sharding().WithoutMetadata());
}
}
}
struct MetadataTestParameter {
explicit MetadataTestParameter(bool propagate_metadata, bool clear_metadata)
: propagate_metadata(propagate_metadata),
clear_metadata(clear_metadata) {}
bool propagate_metadata = false;
bool clear_metadata = false;
};
struct MetadataTestParameterWithOutput {
explicit MetadataTestParameterWithOutput(bool propagate_metadata,
bool clear_metadata,
bool allow_root_sharding_propagation)
: propagate_metadata(propagate_metadata),
clear_metadata(clear_metadata),
allow_root_sharding_propagation(allow_root_sharding_propagation) {}
bool propagate_metadata = false;
bool clear_metadata = false;
bool allow_root_sharding_propagation = false;
};
class ParameterizedMetadataTest
: public HloTestBase,
public ::testing::WithParamInterface<MetadataTestParameter> {};
class ParameterizedMetadataTestWithOutput
: public HloTestBase,
public ::testing::WithParamInterface<MetadataTestParameterWithOutput> {};
std::string OpMetadataListToString(absl::Span<const OpMetadata> metadata) {
std::vector<std::string> metadata_strings;
metadata_strings.reserve(metadata.size());
for (const OpMetadata& element : metadata) {
metadata_strings.push_back(
absl::StrCat("{", OpMetadataToString(element), "}"));
}
return absl::StrCat("{", absl::StrJoin(metadata_strings, ", "), "}");
}
class HloShardingMetadataMatcher
: public ::testing::MatcherInterface<const HloSharding&> {
public:
explicit HloShardingMetadataMatcher(absl::Span<const OpMetadata> metadata)
: metadata_(metadata.begin(), metadata.end()) {}
bool MatchAndExplain(
const HloSharding& sharding,
::testing::MatchResultListener* listener) const override {
if (sharding.metadata().size() != metadata_.size()) {
*listener << sharding.ToString(true)
<< " has incorrect sharding metadata (expected: "
<< OpMetadataListToString(metadata_) << ")";
return false;
}
for (int i = 0, e = metadata_.size(); i < e; ++i) {
if (!protobuf_util::ProtobufEquals(sharding.metadata()[i],
metadata_[i])) {
*listener << sharding.ToString(true)
<< " has incorrect sharding metadata (expected: "
<< OpMetadataListToString(metadata_) << ")";
return false;
}
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << OpMetadataListToString(metadata_);
}
private:
std::vector<OpMetadata> metadata_;
};
::testing::Matcher<const HloSharding&> ShardingMetadata(
absl::Span<const OpMetadata> metadata) {
return ::testing::MakeMatcher(new HloShardingMetadataMatcher(metadata));
}
OpMetadata CreateMetadata(const std::string& op_name) {
OpMetadata metadata;
metadata.set_op_name(op_name);
return metadata;
}
INSTANTIATE_TEST_SUITE_P(
ShardingPropagation, ParameterizedMetadataTest,
::testing::Values(MetadataTestParameter(false,
false),
MetadataTestParameter(false,
true),
MetadataTestParameter(true,
false),
MetadataTestParameter(true,
true)),
[](const ::testing::TestParamInfo<MetadataTestParameter>& info) {
return absl::StrCat(info.param.propagate_metadata
? "MetadataPropagation"
: "NoMetadataPropagation",
"_",
info.param.clear_metadata ? "NoMetadataInModule"
: "MetadataInModule");
});
INSTANTIATE_TEST_SUITE_P(
ShardingPropagation, ParameterizedMetadataTestWithOutput,
::testing::Values(MetadataTestParameterWithOutput(
false,
false,
false),
MetadataTestParameterWithOutput(
false,
true,
false),
MetadataTestParameterWithOutput(
true,
false,
false),
MetadataTestParameterWithOutput(
true,
true,
false),
MetadataTestParameterWithOutput(
false,
false,
true),
MetadataTestParameterWithOutput(
false,
true,
true),
MetadataTestParameterWithOutput(
true,
false,
true),
MetadataTestParameterWithOutput(
true,
true,
true)),
[](const ::testing::TestParamInfo<MetadataTestParameterWithOutput>& info) {
return absl::StrCat(
info.param.propagate_metadata ? "MetadataPropagation"
: "NoMetadataPropagation",
"_",
info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule",
"_",
info.param.allow_root_sharding_propagation ? "PropagateToRoot"
: "NoPropagateToRoot");
});
TEST_P(ParameterizedMetadataTest, ShardingMetadataFromInstruction) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3},
metadata={op_name="test"}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("test")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoOverwrite) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}},
metadata={op_name="test"}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("name")}));
}
TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoMetadata) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("name")}));
}
TEST_F(ShardingPropagationTest, ShardingNoMetadataAndInstructionNoMetadata) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
TEST_P(ParameterizedMetadataTest, ElementwiseOperationForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ElementwiseOperationBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048,2048]{2,1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
%broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%param0), dimensions={0,1,2}
ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048,2048]{2,1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
%shard-barrier-from = f32[3,2048,2048]{2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%shard-barrier-from), dimensions={0,1,2}
ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, BroadcastBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[13]{0} parameter(0)
%broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%param0), dimensions={3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[13]{0} parameter(0)
%param0_copy = f32[13]{0} copy(param0)
%shard-barrier-to = f32[13]{0} custom-call(%param0_copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%shard-barrier-to), dimensions={3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "param0_copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, Broadcast1DBackwardNoChange) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = s32[128]{0} parameter(0)
%constant0 = s32[] constant(0), sharding={replicated}
%broadcast = s32[128]{0} broadcast(%constant0), dimensions={}, sharding={replicated}
ROOT %compare = pred[128]{0} compare(s32[128]{0} %param0, s32[128]{0} %broadcast),
direction=NE, sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048]parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}
ROOT %copy = f32[3,2048,3] copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastMerge) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048]parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}
ROOT %copy = f32[3,2048,3] copy(%broadcast),
sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[24,8]{0,1} parameter(0)
%copy = f32[24,8]{0,1} copy(%param0)
ROOT %broadcast = f32[4,24,6,8]{3,2,1,0} broadcast(%copy), dimensions={1,3},
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastUserPartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[24,8]{0,1} parameter(0)
%copy = f32[24,8]{0,1} copy(%param0)
ROOT %broadcast = f32[4,24,6,8] broadcast(%copy), dimensions={1,3},
sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4,2,1,1]0,1,2,3,4,5,6,7}"));
}
}
TEST_P(ParameterizedMetadataTest, MaximalReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[5,7]{1,0} reduce(%param0, %init), dimensions={2,3}, to_apply=%add
ROOT %copy = f32[5,7]{0,1} copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, ManualTupleReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %reduce {
get-tuple-element.416 = f32[2,1,128]{2,1,0} parameter(0), sharding={manual}
get-tuple-element.417 = s32[2,1,128]{2,1,0} parameter(1), sharding={manual}
constant.3793 = f32[] constant(0)
constant.3795 = s32[] constant(0)
reduce.418 = (f32[2,1]{1,0}, s32[2,1]{1,0}) reduce(
get-tuple-element.416, get-tuple-element.417, constant.3793, constant.3795),
dimensions={2}, to_apply=minmax_func
ROOT %copy = (f32[2,1]{1,0}, s32[2,1]{1,0}) copy(%reduce.418)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce.418");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{manual}, {manual}}"));
}
TEST_P(ParameterizedMetadataTest, ShardedReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[7,11]{1,0} reduce(%param0, %init), dimensions={0,3}, to_apply=%add
ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%reduce = f32[7,11]{1,0} reduce(%shard-barrier-from, %init), dimensions={0,3}, to_apply=%add
ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,2,1,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims2) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyBackward) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0)
%input = f32[8,8] copy(%param0)
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%input, %init), dimensions={0}, to_apply=%add,
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0)
%input = f32[8,8] copy(%param0)
%init = f32[] parameter(1)
%shard-barrier-to = f32[8,8] custom-call(%input), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%reduce = f32[8] reduce(%shard-barrier-to, %init), dimensions={0}, to_apply=%add,
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTestWithOutput,
ShardedOnNonReduceDimTupleReduceForwardAndBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0)
%param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%copy_param0 = f32[28,10] copy(%param0)
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
%reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func
%gte0 = f32[28] get-tuple-element(%reduce), index=0
%gte1 = s32[28] get-tuple-element(%reduce), index=1
%copy0 = f32[28] copy(%gte0)
%copy1 = s32[28] copy(%gte1)
ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(reduce, op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}"));
auto* copy_param0 = FindInstruction(module.get(), "copy_param0");
ASSERT_NE(copy_param0, nullptr);
EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,1]0,1}"));
for (const HloSharding& sharding :
{copy_param0->sharding(), reduce->sharding().tuple_elements()[0],
reduce->sharding().tuple_elements()[1]}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput,
ShardedOnReduceDimTupleReduceForwardAndBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0)
%param1 = s32[28,10] parameter(1), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%copy_param0 = f32[28,10] copy(%param0)
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
%reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func
%gte0 = f32[28] get-tuple-element(%reduce), index=0
%gte1 = s32[28] get-tuple-element(%reduce), index=1
%copy0 = f32[28] copy(%gte0)
%copy1 = s32[28] copy(%gte1)
ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(reduce, op::Sharding("{{devices=[2,2]0,1,2,3 "
"last_tile_dim_replicate},{devices=[2,2]0,1,"
"2,3 last_tile_dim_replicate}}"));
auto* copy_param0 = FindInstruction(module.get(), "copy_param0");
ASSERT_NE(copy_param0, nullptr);
EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,2]0,1,2,3}"));
for (const HloSharding& sharding :
{copy_param0->sharding(), reduce->sharding().tuple_elements()[0],
reduce->sharding().tuple_elements()[1]}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[2,2]0,1,2,3 "
"last_tile_dim_replicate},{devices=[2,2]0,1,2,3 "
"last_tile_dim_replicate}}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, GetTupleElementForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %gte {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param0, %param0)
%tuple.1 = (f32[5,7,11,13]{3,2,1,0},
(f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple(
%param0, %tuple),
sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}},
{replicated metadata={op_name="b"}},
{devices=[1,2,2,1]0,1,2,3 metadata={op_name="c"}}}
%gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%tuple.1), index=0
%gte.1 = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) get-tuple-element(
%tuple.1), index=1
%gte.2 = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%gte.1), index=0
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* gte = FindInstruction(module.get(), "gte");
ASSERT_NE(gte, nullptr);
EXPECT_THAT(gte, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
auto* gte1 = FindInstruction(module.get(), "gte.1");
ASSERT_NE(gte1, nullptr);
EXPECT_THAT(gte1, op::Sharding("{{replicated}, {devices=[1,2,2,1]0,1,2,3}}"));
auto* gte2 = FindInstruction(module.get(), "gte.2");
ASSERT_NE(gte2, nullptr);
EXPECT_THAT(gte2, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(gte->sharding(), ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(gte1->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(gte1->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(gte2->sharding(), ShardingMetadata({CreateMetadata("b")}));
} else {
for (const HloSharding& sharding :
{gte->sharding(), gte1->sharding().tuple_elements()[0],
gte1->sharding().tuple_elements()[1], gte2->sharding()}) {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{replicated}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput,
GetTupleElementForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %gte {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param0, %param0), sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}},
{replicated metadata={op_name="b"}}}
%shard-barrier-from = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) custom-call(%tuple), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%shard-barrier-from), index=0
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* gte = FindInstruction(module.get(), "gte");
ASSERT_NE(gte, nullptr);
EXPECT_FALSE(gte->has_sharding());
}
TEST_P(ParameterizedMetadataTest, TupleForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}}
%param2 = f32[5,7,11,13]{3,2,1,0} parameter(2)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param1, %param2)
%tuple.1 = (f32[5,7,11,13]{3,2,1,0},
(f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple(
%param0, %tuple)
ROOT %copy = (f32[5,7,11,13]{3,2,1,0},
(f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) copy(
%tuple.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1]0,1,2,3},"
" {replicated}}"));
auto* tuple1 = FindInstruction(module.get(), "tuple.1");
ASSERT_NE(tuple1, nullptr);
EXPECT_THAT(tuple1, op::Sharding("{{replicated},"
" {devices=[1,2,2,1]0,1,2,3},"
" {replicated}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({}));
EXPECT_THAT(tuple1->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple1->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(tuple1->sharding().tuple_elements()[2], ShardingMetadata({}));
} else {
for (const HloSharding& tuple_sharding :
{tuple->sharding(), tuple1->sharding()}) {
for (const HloSharding& sub_sharding : tuple_sharding.tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
}
TEST_P(ParameterizedMetadataTest, TupleForwardPass_SplatBug) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dims={manual} metadata={op_name="b"}}
%param2 = f32[5,7,11,13]{3,2,1,0} parameter(2)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param1, %param2)
ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 "
"last_tile_dims={manual}}, {replicated}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, TupleForwardPassAndBackWardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[256,2]{1,0} parameter(0),
sharding={manual metadata={op_name="a"}}
%param1 = f32[256,2]{1,0} parameter(1),
sharding={devices=[1,2]0,1 metadata={op_name="b"}}
%constant = s32[1,2]{1,0} constant({{0,1}})
%gather = f32[1,32,2]{2,1,0} gather(param0, constant), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={32,2}
%tuple = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) tuple(
%gather, %param1)
ROOT %copy = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{manual}, {devices=[1,2]0,1}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, TupleShapedBackWardPass) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%zero = u32[] constant(0), sharding={replicated metadata={op_name="a"}}
%p0 = f32[] parameter(0), sharding={manual metadata={op_name="b"}}
%tuple = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%tuple), body=%body, condition=%cond,
sharding={{manual metadata={op_name="c"}},
{manual metadata={op_name="d"}}}
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{manual}, {manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("d")}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
PartiallyManualTupleWithRepeatedOperandsBackWardPass) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (s32[], s32[], s32[]) parameter(0)
%count.cond = s32[] get-tuple-element(%vars.cond), index=0
%limit = s32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (s32[], s32[], s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%lhs = s32[] get-tuple-element(%param), index=1
%rhs = s32[] get-tuple-element(%param), index=2
%add = s32[] add(%lhs, %rhs)
ROOT %tuple = (s32[], s32[], s32[]) tuple(%count, %lhs, %add)
}
ENTRY %entry {
%zero = s32[] constant(0)
%p0 = s32[] parameter(0), sharding={manual metadata={op_name="a"}}
%tuple = (s32[], s32[], s32[]) tuple(%zero, %zero, %p0)
%while = (s32[], s32[], s32[]) while(%tuple), body=%body, condition=%cond
ROOT %copy = (s32[], s32[], s32[]) copy(%while)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = module->entry_computation()->root_instruction()->operand(0);
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{manual}, {manual}, {manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[2],
ShardingMetadata({CreateMetadata("a")}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ForwardConvolutionForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%rhs = f32[3,3,13,17]{3,2,1,0} parameter(1)
%convolution = f32[5,7,11,17]{3,2,1,0} convolution(%lhs, %rhs),
window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
ROOT %copy = f32[5,7,11,17]{3,2,1,0} copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2,1]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ForwardConvolutionLargeDilationForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[8,64,2]{2,1,0} parameter(0),
sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}}
%rhs = f32[3,2,2]{2,1,0} parameter(1)
%convolution = f32[8,32,2]{2,1,0} convolution(%lhs, %rhs),
window={size=3 rhs_dilate=16}, dim_labels=b0f_0io->b0f
ROOT %copy = f32[8,32,2]{2,1,0} copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ForwardConvolution3DSmallKernel) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = bf16[32,32,8,7,128]{4,3,2,1,0} parameter(0),
sharding={devices=[1,4,1,1,1]0,1,2,3 metadata={op_name="a"}}
%rhs = bf16[3,3,3,128,256]{4,3,2,1,0} parameter(1)
%convolution = bf16[16,16,8,3,256]{4,3,2,1,0}
convolution(bf16[32,32,8,7,128]{4,3,2,1,0} %lhs,
bf16[3,3,3,128,256]{4,3,2,1,0} %rhs),
window={size=3x3x3 stride=2x2x2 pad=1_1x1_1x0_0},
dim_labels=01b2f_012io->01b2f
ROOT %copy = bf16[16,16,8,3,256]{4,3,2,1,0} copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1,1,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, TransposeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}}
%transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0}
ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "transpose");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,2,1,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, TransposeForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}}
%shard-barrier-from = f32[7,11,13]{2,1,0} custom-call(%param), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-from), dimensions={1,2,0}
ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "transpose");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, TransposeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0)
%copy = f32[7,11,13]{2,1,0} copy(%param)
ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0},
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,2,1,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, TransposeBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0)
%copy = f32[7,11,13]{2,1,0} copy(%param)
%shard-barrier-to = f32[7,11,13]{2,1,0} custom-call(%copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-to), dimensions={1,2,0},
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1430,1]{1,0} parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%reshape = f32[10,11,13]{2,1,0} reshape(%param0)
ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1430,1]{1,0} parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%shard-barrier-from = f32[1430,1]{1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%reshape = f32[10,11,13]{2,1,0} reshape(%shard-barrier-from)
ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[14,32] parameter(0),
sharding={devices=[4,4]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 metadata={op_name="a"}}
%reshape = f32[7,2,2,16] reshape(%param0)
ROOT %copy = f32[7,2,2,16] copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,1,2,2,4]0,4,8,12,1,5,9,13,2,6,10,14,3,"
"7,11,15 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[12,8] parameter(0),
sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%reshape = f32[8,12] reshape(%param0)
ROOT %copy = f32[8,12] copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassTranspose) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[6,4,5] parameter(0), sharding={devices=[6,2,1]<=[12] metadata={op_name="a"}}
%reshape.1 = f32[2,3,20] reshape(%param0)
%reshape.2 = f32[2,4,3,5] reshape(%param0)
%reshape.3 = f32[20,6] reshape(%param0)
%reshape.4 = f32[3,5,8] reshape(%param0)
%reshape.5 = f32[10,4,3] reshape(%param0)
%reshape.6 = f32[5,8,3] reshape(%param0)
ROOT %tuple = tuple(%reshape.1, %reshape.2, %reshape.3, %reshape.4, %reshape.5, %reshape.6)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
std::vector<std::pair<std::string, std::string>> instruction_and_sharding = {
{"reshape.1", "{devices=[2,3,2]<=[12]}"},
{"reshape.2", "{devices=[2,1,1,1,6]<=[12] last_tile_dim_replicate}"},
{"reshape.3", "{devices=[2,1,6]<=[12] last_tile_dim_replicate}"},
{"reshape.4", "{devices=[3,1,1,4]<=[12] last_tile_dim_replicate}"},
{"reshape.5", "{devices=[2,1,1,6]<=[12] last_tile_dim_replicate}"},
{"reshape.6", "{replicated}"}};
for (const auto& [name, sharding] : instruction_and_sharding) {
auto* instruction = FindInstruction(module.get(), name);
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding(sharding));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ReshapeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[2002,1]{1,0} parameter(0)
%copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)
ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy),
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[2002,1]{1,0} parameter(0)
%copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)
%shard-barrier-to = f32[2002,1]{1,0} custom-call(%copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%shard-barrier-to),
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, PadForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %pad {
%input = f32[11,17]{1,0} parameter(0),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%pad_value = f32[] parameter(1)
%pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2
ROOT %copy = f32[27,51]{1,0} copy(%pad)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "pad");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PadBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %pad {
%input = f32[11,17]{1,0} parameter(0)
%copy = f32[11,17]{1,0} copy(%input)
%pad_value = f32[] parameter(1)
%pad = f32[27,51]{1,0} pad(%copy, %pad_value), padding=2_4_1x1_1_2,
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
ROOT %result = f32[27,51]{1,0} copy(%pad)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicatedPadForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %pad {
%input = f32[11,17]{1,0} parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%pad_value = f32[] parameter(1)
%pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2
ROOT %copy = f32[27,51]{1,0} copy(%pad)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "pad");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ShardedPreferredOverReplicated) {
const char* const hlo_string = R"(
HloModule module
ENTRY %replicated {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}}
%copy.1 = f32[5,7,11,13]{3,2,1,0} copy(%param1)
%add = f32[5,7,11,13]{3,2,1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[5,7,11,13]{3,2,1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_THAT(copy, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
auto* copy1 = FindInstruction(module.get(), "copy.1");
ASSERT_NE(copy1, nullptr);
EXPECT_THAT(copy1, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
for (const HloSharding& sharding :
{copy->sharding(), copy1->sharding(), add->sharding()}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1430,1]{1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%reshape = f32[10,11,13]{2,1,0} reshape(%param0)
ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[2002,1]{1,0} parameter(0)
%copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)
ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy),
sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DontShardTuplesIfAllInputIsMaximal) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={maximal device=0 metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={maximal device=1 metadata={op_name="b"}}
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param0, %param1)
ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
!GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "tuple");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::NoSharding());
}
TEST_P(ParameterizedMetadataTest, ValidConvolution) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[13,17,19]{2,1,0} parameter(0),
sharding={devices=[1,2,1]0,1 metadata={op_name="a"}}
%rhs = f32[19,5,19]{2,1,0} parameter(1)
%conv = f32[13,13,19]{2,1,0} convolution(%lhs, %rhs),
window={size=5}, dim_labels=b0f_i0o->b0f
ROOT %tuple = (f32[13,13,19]{2,1,0}) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, StridedSlice) {
const char* const hlo_string = R"(
HloModule module
ENTRY %slice {
%param = f32[17,13]{1,0} parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]}
ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "slice");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicatedStridedSlice) {
const char* const hlo_string = R"(
HloModule module
ENTRY %slice {
%param = f32[17,13]{1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]}
ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "slice");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%add (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce_window {
%param = f32[13,17]{1,0} parameter(0)
%param.copy = f32[13,17]{1,0} copy(%param)
%init = f32[] parameter(1)
ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%param.copy, %init),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* param_copy = FindInstruction(module.get(), "param.copy");
ASSERT_NE(param_copy, nullptr);
EXPECT_THAT(param_copy, op::Sharding("{devices=[2,1]0,1}"));
auto* reduce_window = FindInstruction(module.get(), "reduce-window");
ASSERT_NE(reduce_window, nullptr);
EXPECT_THAT(reduce_window, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(param_copy->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(reduce_window->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(param_copy->sharding(), ShardingMetadata({}));
EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce_window {
%param = f32[13,17]{1,0} parameter(0)
%param.copy = f32[13,17]{1,0} copy(%param)
%init = f32[] parameter(1)
%shard-barrier-to = f32[13,17]{1,0} custom-call(%param.copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%shard-barrier-to, %init),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* param_copy = FindInstruction(module.get(), "param.copy");
ASSERT_NE(param_copy, nullptr);
EXPECT_FALSE(param_copy->has_sharding());
}
TEST_P(ParameterizedMetadataTest, VariadicReduceWindowBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%add (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {
%a = f32[] parameter(0)
%b = s32[] parameter(1)
%c = f32[] parameter(2)
%d = s32[] parameter(3)
%add.0 = f32[] add(%a, %c)
%add.1 = s32[] add(%b, %d)
ROOT %t = tuple(%add.0, %add.1)
}
ENTRY %reduce_window {
%param.0 = f32[13,17]{1,0} parameter(0)
%param.0.copy = f32[13,17]{1,0} copy(%param.0)
%param.1 = s32[13,17]{1,0} parameter(1)
%param.1.copy = s32[13,17]{1,0} copy(%param.1)
%init.0 = f32[] parameter(2)
%init.1 = s32[] parameter(3)
ROOT %reduce-window = (f32[7,17]{1,0}, s32[7,17]{1,0}) reduce-window(%param.0.copy, %param.1.copy, %init.0, %init.1),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,
sharding={{devices=[2,1]0,1 metadata={op_name="a"}}, {devices=[2,1]0,1 metadata={op_name="b"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* param_0_copy = FindInstruction(module.get(), "param.0.copy");
ASSERT_NE(param_0_copy, nullptr);
EXPECT_THAT(param_0_copy, op::Sharding("{devices=[2,1]0,1}"));
auto* param_1_copy = FindInstruction(module.get(), "param.1.copy");
ASSERT_NE(param_1_copy, nullptr);
EXPECT_THAT(param_1_copy, op::Sharding("{devices=[2,1]0,1}"));
auto* reduce_window = FindInstruction(module.get(), "reduce-window");
ASSERT_NE(reduce_window, nullptr);
EXPECT_THAT(reduce_window,
op::Sharding("{{devices=[2,1]0,1}, {devices=[2,1]0,1}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(param_0_copy->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(param_1_copy->sharding(),
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(reduce_window->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(reduce_window->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({}));
EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({}));
EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReplicatedConvolutionLhs) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[3,2,3]{2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%rhs = f32[2,2,1]{2,1,0} parameter(1)
%conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs),
window={size=1}, dim_labels=bf0_oi0->bf0
ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(lhs, op::Sharding("{replicated}"));
auto* conv = FindInstruction(module.get(), "conv");
ASSERT_NE(conv, nullptr);
EXPECT_THAT(conv, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(conv->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(lhs->sharding(), ShardingMetadata({}));
EXPECT_THAT(conv->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvolutionShardedFeature) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[3,2,3]{2,1,0} parameter(0),
sharding={devices=[1,2,1]0,1 metadata={op_name="a"}}
%rhs = f32[2,2,1]{2,1,0} parameter(1)
%conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs),
window={size=1}, dim_labels=bf0_oi0->bf0
ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvolutionDifferentDimensionNumbers) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[8,16,512] parameter(0),
sharding={devices=[1,2,1]0,1 metadata={op_name="a"}}
%rhs = f32[8,2,512] parameter(1)
%conv = f32[3,512,512] convolution(%lhs, %rhs),
window={size=2 stride=5},
dim_labels=f0b_i0o->0bf
ROOT %tuple = (f32[3,512,512]) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, Concatenate) {
const char* const hlo_string = R"(
HloModule module
ENTRY %concat {
%param.0 = f32[5,7] parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%param.1 = f32[5,9] parameter(1),
sharding={devices=[2,1]0,1 metadata={op_name="b"}}
%concat = f32[5,16] concatenate(%param.0, %param.1),
dimensions={1}
ROOT %tuple = (f32[5,16]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "concat");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConcatenateForwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %concat {
%param.0 = f32[5,7] parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%param.1 = f32[5,9] parameter(1),
sharding={devices=[2,1]0,1 metadata={op_name="b"}}
%shard-barrier-from.0 = f32[5,7] custom-call(%param.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.1 = f32[5,9] custom-call(%param.1), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%concat = f32[5,16] concatenate(%shard-barrier-from.0, %shard-barrier-from.1),
dimensions={1}
ROOT %tuple = (f32[5,16]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "concat");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ConcatenateBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %concat {
%param.0 = f32[5,7] parameter(0)
%copy.0 = f32[5,7] copy(%param.0)
%param.1 = f32[5,9] parameter(1)
%copy.1 = f32[5,9] copy(%param.1)
%shard-barrier-to = f32[5,9] custom-call(%copy.1), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%concat = f32[5,16] concatenate(%copy.0, %shard-barrier-to),
dimensions={1}, sharding={devices=[2,1]0,1 metadata={op_name="a"}}
ROOT %tuple = (f32[5,16]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy.1");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, TupleBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param.0 = f32[1] parameter(0)
%param.1 = f32[3] parameter(1)
%copy.0 = f32[1] copy(%param.0)
%copy.1 = f32[3] copy(%param.1)
ROOT %tuple = (f32[1], f32[3]) tuple(%copy.0, %copy.1),
sharding={{replicated metadata={op_name="a"}},
{devices=[2]0,1 metadata={op_name="b"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy0 = FindInstruction(module.get(), "copy.0");
ASSERT_NE(copy0, nullptr);
EXPECT_THAT(copy0, op::Sharding("{replicated}"));
auto* copy1 = FindInstruction(module.get(), "copy.1");
ASSERT_NE(copy1, nullptr);
EXPECT_THAT(copy1, op::Sharding("{devices=[2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(copy0->sharding(), ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(copy1->sharding(), ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(copy0->sharding(), ShardingMetadata({}));
EXPECT_THAT(copy1->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, AllReduce) {
const char* const hlo_string = R"(
HloModule module
%add (lhs: f32[], rhs: f32[]) -> f32[] {
%add_lhs = f32[] parameter(0)
%add_rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %add_lhs, f32[] %add_rhs)
}
ENTRY %entry {
%param.0 = f32[3] parameter(0)
%param.1 = f32[3] parameter(1)
%copy_f_t = f32[3] copy(%param.1),
sharding={devices=[2]0,1 metadata={op_name="a"}}
%crs_f.tiled = f32[3] all-reduce(%copy_f_t), to_apply=%add
%crs_f.none = f32[3] all-reduce(%copy_f_t), to_apply=%add,
channel_id=1
%crs_b.replicated = f32[3] all-reduce(%param.0), to_apply=%add
%copy_b_r = f32[3] copy(%crs_b.replicated),
sharding={replicated metadata={op_name="b"}}
ROOT %tuple = (f32[3], f32[3], f32[3]) tuple(
%crs_f.tiled, crs_f.none, %copy_b_r)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* crs_f_tiled = FindInstruction(module.get(), "crs_f.tiled");
ASSERT_NE(crs_f_tiled, nullptr);
EXPECT_THAT(crs_f_tiled, op::Sharding("{devices=[2]0,1}"));
auto* crs_f_none = FindInstruction(module.get(), "crs_f.none");
ASSERT_NE(crs_f_none, nullptr);
EXPECT_THAT(crs_f_none, op::Sharding("{devices=[2]0,1}"));
auto* crs_b_replicated = FindInstruction(module.get(), "crs_b.replicated");
ASSERT_NE(crs_b_replicated, nullptr);
EXPECT_THAT(crs_b_replicated, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(crs_f_tiled->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(crs_b_replicated->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({}));
EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, While) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[10,10]) parameter(0)
%count.cond = u32[] get-tuple-element((u32[], f32[10,10]) %vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT
}
%body {
%vars = (u32[], f32[10,10]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%acc = f32[10,10] get-tuple-element((u32[], f32[10,10]) %vars), index=1
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one), sharding={replicated}
%acc.1 = f32[10,10] add(f32[10,10] %acc, f32[10,10] %acc)
ROOT %tuple = (u32[], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc.1)
}
ENTRY %entry {
%p0 = f32[10,10] parameter(0)
%p0.copy = f32[10,10] copy(f32[10,10] %p0)
%p1 = f32[10,10] parameter(1)
%zero = u32[] constant(0)
%init = (u32[], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy)
%while = (u32[], f32[10,10]) while((u32[], f32[10,10]) %init),
body=%body, condition=%cond
%res = f32[10,10] get-tuple-element((u32[], f32[10,10]) %while), index=1
%prev = f32[10,10] get-tuple-element((u32[], f32[10,10]) %init), index=1
%res.1 = f32[10,10] multiply(f32[10,10] %res, %prev)
ROOT %res_tuple = (f32[10,10]) tuple(f32[10,10] %res.1)
})";
auto while_is_sharded =
[this](HloModule* module, const HloSharding& sharding,
absl::Span<const absl::Span<const OpMetadata>> sharding_metadata) {
if (GetParam().clear_metadata) {
ClearMetadata(module);
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module));
EXPECT_TRUE(changed);
auto while_instr = FindInstruction(module, "while");
EXPECT_NE(nullptr, while_instr);
std::vector<const HloInstruction*> instructions{
while_instr, while_instr->while_body()->root_instruction(),
while_instr->while_body()->parameter_instruction(0),
while_instr->while_condition()->parameter_instruction(0)};
for (auto instr : instructions) {
ASSERT_TRUE(instr->has_sharding());
EXPECT_EQ(sharding, instr->sharding());
ASSERT_EQ(instr->sharding().tuple_elements().size(),
sharding_metadata.size());
for (int i = 0, e = sharding_metadata.size(); i < e; ++i) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instr->sharding().tuple_elements()[i],
ShardingMetadata(sharding_metadata[i]));
} else {
EXPECT_THAT(instr->sharding().tuple_elements()[i],
ShardingMetadata({}));
}
}
}
};
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto body_root = FindInstruction(module.get(), "tuple");
EXPECT_NE(nullptr, body_root);
auto sharding = ParseSharding(
"{{replicated metadata={op_name=\"b\"}}, "
"{devices=[2,1]0,1 metadata={op_name=\"c\"}}}")
.value();
body_root->set_sharding(sharding);
while_is_sharded(module.get(), sharding.WithoutMetadata(),
{{CreateMetadata("b")}, {CreateMetadata("c")}});
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto acc_1 = FindInstruction(module.get(), "acc.1");
EXPECT_NE(nullptr, acc_1);
acc_1->set_sharding(
ParseSharding("{devices=[2,1]0,1 metadata={op_name=\"b\"}}").value());
while_is_sharded(
module.get(),
ParseSharding("{{replicated}, {devices=[2,1]0,1}}").value(),
{{}, {CreateMetadata("b")}});
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto acc_1 = FindInstruction(module.get(), "acc.1");
EXPECT_NE(nullptr, acc_1);
acc_1->set_sharding(
ParseSharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate "
"metadata={op_name=\"b\"}}")
.value());
auto p0 = FindInstruction(module.get(), "p0");
p0->set_sharding(
ParseSharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate "
"metadata={op_name=\"c\"}}")
.value());
while_is_sharded(module.get(),
ParseSharding("{{replicated}, "
"{devices=[2,2]0,1,2,3}}")
.value(),
{{}, {CreateMetadata("c"), CreateMetadata("b")}});
}
}
TEST_F(ShardingPropagationTest, PropagateShardingInWhileCondition) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%vars = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%acc = f32[] get-tuple-element(%vars), index=1
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one)
%acc.1 = f32[] add(f32[] %acc, f32[] %acc)
ROOT %tuple = (u32[], f32[]) tuple(%count.1, %acc.1)
}
ENTRY %entry {
%p0 = f32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
%zero = u32[] constant(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
%init = (u32[], f32[]) tuple(%zero, %p0)
ROOT %while = (u32[], f32[]) while(%init), body=%body, condition=%cond
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, false,
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
HloSharding single_sharding =
ParseSharding("{devices=[2,2]<=[4] last_tile_dims={manual, replicated}}")
.value();
HloSharding tuple_sharding = HloSharding::SingleTuple(
module->entry_computation()->root_instruction()->shape(),
single_sharding);
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_TRUE(instruction->has_sharding());
EXPECT_EQ(instruction->sharding(), instruction->shape().IsTuple()
? tuple_sharding
: single_sharding);
}
}
}
TEST_P(ParameterizedMetadataTest, WhileGetShardingFromRecvInBody) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="a"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
!GetParam().propagate_metadata && !GetParam().clear_metadata);
auto sharding =
ParseSharding("{{maximal device=1}, {maximal device=1}}").value();
auto while_instr = FindInstruction(module.get(), "while");
ASSERT_NE(nullptr, while_instr);
std::vector<const HloInstruction*> instructions{
while_instr, while_instr->while_body()->root_instruction(),
while_instr->while_body()->parameter_instruction(0),
while_instr->while_condition()->parameter_instruction(0)};
for (auto instr : instructions) {
ASSERT_TRUE(instr->has_sharding());
EXPECT_EQ(sharding, instr->sharding());
for (const HloSharding& sub_sharding : instr->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyBeforeRecv) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0,
sharding={maximal device=0 metadata={op_name="a"}}
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="b"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
auto result =
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr(
"Instruction: count is on device: 0, which conflicts with "
"device: 1 of channel instruction: recv"));
}
TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyAfterRecv) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="a"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0,
sharding={maximal device=0 metadata={op_name="b"}}
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
auto result =
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr(
"Instruction: data is on device: 0, which conflicts with "
"device: 1 of channel instruction: recv"));
}
TEST_P(ParameterizedMetadataTest, WhileConflictingShardingOnWhileInstruction) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="a"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond,
sharding={{maximal device=0 metadata={op_name="b"}},{maximal device=0}}
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
auto result =
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr(
"Instruction: while is on device: 0, which conflicts with "
"device: 1 of channel instruction: recv"));
}
TEST_P(ParameterizedMetadataTest, WhileConv) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(2)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%i0 = s32[] constant(0)
%count = u32[] get-tuple-element(%param), index=0
%gte0 = bf16[2,2048,768]{2,1,0}
get-tuple-element(%param), index=1
%index = s32[] get-tuple-element(%param), index=4
%dys = bf16[1,2048,768]{2,1,0} dynamic-slice(%gte0, s32[] %index, s32[] %i0, s32[] %i0),
dynamic_slice_sizes={1,2048,768}
%kernel = bf16[2048, 768]{1,0}
reshape(%dys)
%lhs = bf16[128,512,2048]{2,1,0}
get-tuple-element(%param), index=2,
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
%reshape = bf16[2048,768,1]{2,1,0} reshape(bf16[2048,768]{1,0} %kernel)
%convolution = bf16[128,512,768]{2,1,0}
convolution(bf16[128,512,2048]{2,1,0} %lhs,
bf16[2048,768,1]{2,1,0} %reshape), window={size=1},
dim_labels=0bf_io0->0bf, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %tuple = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%count, %gte0, %lhs, %convolution, index)
}
ENTRY %entry {
%p0 = bf16[2048,768] parameter(0),
sharding={devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%p1 = bf16[128,512,2048] parameter(1),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
%p2 = bf16[128,512,768] parameter(2)
%reshape0 = bf16[1,2048,768] reshape(%p0)
%concat0 = bf16[2,2048,768] concatenate(%reshape0, %reshape0), dimensions={0}
%zero = u32[] constant(0)
%p3 = s32[] parameter(3)
%init = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%zero, %concat0, %p1, %p2, %p3)
%while = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) while(%init), body=%body, condition=%cond
ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "kernel");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,"
"7,9,11,13,15 last_tile_dim_replicate}"));
}
TEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = bf16[16,2048,768] parameter(0),
sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0}
%add = bf16[32,2048,768] add(%concat, %concat),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %result = bf16[32,2048,768] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "concat");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8,"
"9,10,11,12,13,14,15}"));
}
TEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = bf16[16,2048,768] parameter(0),
sharding={devices=[1,2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0}
%add = bf16[32,2048,768] add(%concat, %concat),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %result = bf16[32,2048,768] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "concat");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8,"
"9,10,11,12,13,14,15}"));
}
TEST_P(ParameterizedMetadataTest,
DoNotPassThroughDynamicSliceAtFirstIteration) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = bf16[64,2048,768] parameter(0),
sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%dys = bf16[32,2048,768] dynamic-slice(%p0, s32[] %p1, s32[] %i0, s32[] %i0),
dynamic_slice_sizes={32,2048,768}
%add = bf16[32,2048,768] add(%dys, %dys),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %result = bf16[32,2048,768] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "dys");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8,"
"9,10,11,12,13,14,15}"));
}
TEST_P(ParameterizedMetadataTest, Dot) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%param.0 = f32[8,256,128] parameter(0)
%param.1 = f32[8,128,512] parameter(1)
%param.2 = f32[8,128] parameter(2)
%p0_copy_0 = f32[8,256,128] copy(%param.0),
sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}}
%p1_copy_0 = f32[8,128,512] copy(%param.1),
sharding={devices=[1,1,4]0,1,2,3 metadata={op_name="b"}}
%p2_copy = f32[8,128] copy(%param.2)
%dot_prop_rhs = f32[8,256,512] dot(%p0_copy_0, %p1_copy_0),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%dot_prop_lhs = f32[8,512,256] dot(%p1_copy_0, %p0_copy_0),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={2}
%dot_mat_vec = f32[8,256] dot(%p0_copy_0, %p2_copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%p0_copy_1 = f32[8,256,128] copy(%param.0)
%p1_copy_1 = f32[8,128,512] copy(%param.1)
%dot_back_prop_rhs = f32[8,256,512] dot(%p0_copy_1, %p1_copy_1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%copy_back_prop_rhs = f32[8,256,512] copy(%dot_back_prop_rhs),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="c"}}
ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512])
tuple(%dot_prop_lhs, %dot_prop_rhs, %dot_mat_vec, %copy_back_prop_rhs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* dot_prop_rhs = FindInstruction(module.get(), "dot_prop_rhs");
ASSERT_NE(dot_prop_rhs, nullptr);
EXPECT_THAT(dot_prop_rhs, op::Sharding("{devices=[1,1,4]0,1,2,3}"));
auto* dot_prop_lhs = FindInstruction(module.get(), "dot_prop_lhs");
ASSERT_NE(dot_prop_lhs, nullptr);
EXPECT_THAT(dot_prop_lhs, op::Sharding("{devices=[1,4,1]0,1,2,3}"));
auto* dot_mat_vec = FindInstruction(module.get(), "dot_mat_vec");
ASSERT_NE(dot_mat_vec, nullptr);
EXPECT_THAT(dot_mat_vec, op::Sharding("{devices=[1,4]0,1,2,3}"));
auto* p0_copy_1 = FindInstruction(module.get(), "p0_copy_1");
ASSERT_NE(p0_copy_1, nullptr);
EXPECT_THAT(
p0_copy_1,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
auto* p1_copy_1 = FindInstruction(module.get(), "p1_copy_1");
ASSERT_NE(p1_copy_1, nullptr);
EXPECT_THAT(
p1_copy_1,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
auto* dot_back_prop_rhs = FindInstruction(module.get(), "dot_back_prop_rhs");
ASSERT_NE(dot_back_prop_rhs, nullptr);
EXPECT_THAT(dot_back_prop_rhs, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(dot_prop_rhs->sharding(),
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(dot_prop_lhs->sharding(),
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(dot_mat_vec->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(p0_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(p1_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(dot_back_prop_rhs->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
for (HloInstruction* instruction :
{dot_prop_rhs, dot_prop_lhs, dot_mat_vec, p0_copy_1, p1_copy_1,
dot_back_prop_rhs}) {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DotTiledBatchDim) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0)
%p1 = f32[8,512,128] parameter(1)
%add = f32[8,256,512] add(%p0, %p0)
%dot = f32[8,256,128] dot(%add, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%res = f32[8,32768] reshape(%dot),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
ROOT %tuple = (f32[8,32768]) tuple(%res)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DotMergeOperands) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1),
sharding={devices=[2,2,1,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate metadata={op_name="b"}}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DotMergeOperands2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="b"}}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DotMergeOperands3) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[256,512] parameter(0),
sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[128,512] parameter(1),
sharding={devices=[4,2]0,4,2,6,3,7,1,5 metadata={op_name="b"}}
%dot = f32[256,128] dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %copy = f32[256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,2,3,1,4,6,7,5}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ForwardDotWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1)
%shard-barrier-from = f32[8,256,512] custom-call(%p0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%dot = f32[8,256,128] dot(%shard-barrier-from, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, BackwardDotWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1)
%copy1 = f32[8,128,512] copy(%p1)
%shard-barrier-to = f32[8,128,512] custom-call(%copy1), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%dot = f32[8,256,128] dot(%p0, %shard-barrier-to),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, BackwardDotFromContracting) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1)
%copy1 = f32[8,128,512] copy(%p1)
%dot = f32[8,256,128] dot(%p0, %copy1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BackwardDotFromContractingWithManual) {
const char* const hlo_string = R"(
HloModule module
ENTRY %dot {
%p0 = f32[8,512] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%p1 = f32[512,128] parameter(1)
%copy1 = f32[512,128] copy(%p1)
%dot = f32[8,128] dot(%p0, %copy1),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={0},
sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="b"}}
ROOT %copy = f32[8,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsForward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[128,1,1,1001] parameter(0),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[1,1,1024,1001] parameter(1),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}}
%convolution = f32[128,1,1,1024] convolution(%lhs, %rhs),
window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
ROOT %copy = f32[128,1,1,1024] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvAsDotForwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[128,1,1,1001] parameter(0),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[1,1,1024,1001] parameter(1),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}}
%shard-barrier-from = f32[1,1,1024,1001] custom-call(%rhs), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%convolution = f32[128,1,1,1024] convolution(%lhs, %shard-barrier-from),
window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
ROOT %copy = f32[128,1,1,1024] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsBackward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[128,5,5,128] parameter(0)
%lhs = f32[128,5,5,128] copy(%p0)
%p1 = f32[5,5,128,768] parameter(1)
%rhs = f32[5,5,128,768] copy(%p1)
%convolution = f32[128,1,1,768] convolution(%lhs, %rhs), window={size=5x5},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
ROOT %copy = f32[128,1,1,768] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
auto* rhs = FindInstruction(module.get(), "rhs");
ASSERT_NE(rhs, nullptr);
for (HloInstruction* instruction : {lhs, rhs}) {
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ConvAsDotBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[128,5,5,128] parameter(0)
%lhs = f32[128,5,5,128] copy(%p0)
%p1 = f32[5,5,128,768] parameter(1)
%rhs = f32[5,5,128,768] copy(%p1)
%shard-barrier-from = f32[128,5,5,128] custom-call(%lhs), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%convolution = f32[128,1,1,768] convolution(%shard-barrier-from, %rhs), window={size=5x5},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
ROOT %copy = f32[128,1,1,768] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(lhs, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest,
ConvolutionFilterIFOFPartitionedInputPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,112,112,12] parameter(0)
%lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs),
sharding={devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%rhs = f32[7,7,12,64] parameter(1)
%rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs),
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="b"}}
%conv = f32[128,56,56,64] convolution(
f32[128,112,112,12] %lhs.copy,
f32[7,7,12,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f
ROOT %copy = f32[128,56,56,64] copy(conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvolutionDataParallelism) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
p0 = f32[256,512,16,32] parameter(0), sharding={devices=[2,2,2,2]<=[16] metadata={op_name="lhs_sharding"}}
p1 = f32[512,1,12,28] parameter(1), sharding={replicated metadata={op_name="rhs_sharding"}}
conv = f32[256,512,5,5] convolution(p0, p1), window={size=12x28}, dim_labels=bf01_oi01->bf01, feature_group_count=512
ROOT copy = f32[256,512,5,5] copy(conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,1,1,8]<=[16] last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("lhs_sharding")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConcatFromUserUnshardedDim) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,128] parameter(0)
%p1 = f32[8,128] parameter(1)
%c0 = f32[8,128] copy(%p0)
%c1 = f32[8,128] copy(%p1)
%concat = f32[16,128] concatenate(%c0, %c1),
dimensions={0},
sharding={devices=[1,2]0,1 metadata={op_name="a"}}
ROOT %tuple = (f32[16,128]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
for (HloInstruction* instruction : {c0, c1}) {
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDim) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,128] parameter(0)
%p1 = f32[8,128] parameter(1)
%c0 = f32[8,128] copy(%p0)
%c1 = f32[8,128] copy(%p1)
%concat = f32[16,128] concatenate(%c0, %c1),
dimensions={0},
sharding={devices=[3,1]0,1,2 metadata={op_name="a"}}
ROOT %tuple = (f32[16,128]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
EXPECT_THAT(c0, op::Sharding("{devices=[2,1]0,1}"));
ASSERT_NE(c0, nullptr);
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(c1, op::Sharding("{devices=[2,1]1,2}"));
for (HloInstruction* instruction : {c0, c1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDimMaximalOperand) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,128] parameter(0)
%p1 = f32[24,128] parameter(1)
%c0 = f32[8,128] copy(%p0)
%c1 = f32[24,128] copy(%p1)
%concat = f32[32,128] concatenate(%c0, %c1),
dimensions={0},
sharding={devices=[4,1]0,1,2,3 metadata={op_name="a"}}
ROOT %tuple = (f32[32,128]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_THAT(c0, op::NoSharding());
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(c1, op::Sharding("{devices=[3,1]1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(c1->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(c1->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReplicatedToSideEffecting) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry_computation {
%const.0 = s32[] constant(0),
sharding={replicated metadata={op_name="a"}}
%const.1 = s32[] constant(2147483647),
sharding={replicated metadata={op_name="b"}}
%rng = s32[4]{0} rng(%const.0, %const.1),
distribution=rng_uniform
ROOT %root = (s32[4]{0}) tuple(%rng)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
!GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "rng");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::NoSharding());
}
TEST_P(ParameterizedMetadataTest, PartReplicatedTupleUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry_computation {
%param.0 = f32[5] parameter(0)
%param.1 = f32[7] parameter(1)
%param.2 = f32[9] parameter(2)
%tuple.0 = (f32[5], f32[7]) tuple(%param.0, %param.1)
ROOT %tuple.1 = ((f32[5], f32[7]), f32[9]) tuple(%tuple.0, %param.2),
sharding={{maximal device=0 metadata={op_name="a"}},
{replicated metadata={op_name="b"}},
{maximal device=1 metadata={op_name="c"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "tuple.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{maximal device=0}, {replicated}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
for (const HloSharding& sub_sharding :
instruction->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, Conditional) {
const char* const hlo_string = R"(
HloModule module
%add-call {
%x = f32[4,4] parameter(0)
ROOT %add = f32[4,4] add(%x, %x)
}
%true_comp {
%tp = (f32[3,5], f32[4,4]) parameter(0)
%tgte.0 = f32[3,5] get-tuple-element(%tp), index=0
%ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0}
%tgte.1 = f32[4,4] get-tuple-element(%tp), index=1
%tadd = f32[4,4] call(%tgte.1), to_apply=%add-call
ROOT %tr = (f32[5,3], f32[4,4]) tuple(%ttr, %tadd)
}
%mul-call {
%y = f32[4,4] parameter(0)
ROOT %mul = f32[4,4] multiply(%y, %y)
}
%false_comp {
%fp = (f32[5,3], f32[4,4]) parameter(0)
%fgte.0 = f32[5,3] get-tuple-element(%fp), index=0
%fgte.1 = f32[4,4] get-tuple-element(%fp), index=1
%fmul = f32[4,4] call(%fgte.1), to_apply=%mul-call
ROOT %fr = (f32[5,3], f32[4,4]) tuple(%fgte.0, %fmul)
}
ENTRY entry {
%cond = pred[] parameter(0)
%tp.0 = f32[3,5] parameter(1), sharding={devices=[1,2]0,1 metadata={op_name="a"}}
%fp.0 = f32[5,3] parameter(2), sharding={devices=[1,3]0,1,2 metadata={op_name="b"}}
%constant = f32[4] constant({1,2,3,4}), sharding={devices=[4]0,1,2,3 metadata={op_name="c"}}
%broadcast = f32[4,4] broadcast(%constant), dimensions={1}
%add = f32[4,4] add(%broadcast, %broadcast)
%true_param = (f32[3,5], f32[4,4]) tuple(%tp.0, %add)
%false_param = (f32[5,3], f32[4,4]) tuple(%fp.0, %add)
%conditional = (f32[5,3], f32[4,4]) conditional(
%cond, %true_param, %false_param),
true_computation=%true_comp,
false_computation=%false_comp
ROOT %root = f32[5,3] get-tuple-element(%conditional), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tp = FindInstruction(module.get(), "tp");
auto* tgte_0 = FindInstruction(module.get(), "tgte.0");
auto* ttr = FindInstruction(module.get(), "ttr");
auto* tgte_1 = FindInstruction(module.get(), "tgte.1");
auto* tadd = FindInstruction(module.get(), "tadd");
auto* tr = FindInstruction(module.get(), "tr");
auto* fp = FindInstruction(module.get(), "fp");
auto* fgte_0 = FindInstruction(module.get(), "fgte.0");
auto* fgte_1 = FindInstruction(module.get(), "fgte.1");
auto* fmul = FindInstruction(module.get(), "fmul");
auto* fr = FindInstruction(module.get(), "fr");
auto* x = FindInstruction(module.get(), "x");
auto* add = FindInstruction(module.get(), "add");
auto* y = FindInstruction(module.get(), "y");
auto* mul = FindInstruction(module.get(), "mul");
auto* conditional = FindInstruction(module.get(), "conditional");
const std::vector<HloInstruction*> instructions(
{tp, tgte_0, ttr, tgte_1, tadd, tr, fp, fgte_0, fgte_1, fmul, fr, x, add,
y, mul, conditional});
for (HloInstruction* instruction : instructions) {
EXPECT_NE(instruction, nullptr);
EXPECT_TRUE(instruction->has_sharding());
}
for (HloInstruction* instruction :
{tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) {
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}"));
}
for (HloInstruction* instruction : {tr, fr, conditional, fp}) {
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,3]0,1,2}, {devices=[1,4]0,1,2,3}}"));
}
EXPECT_THAT(tp, op::Sharding("{{devices=[1,2]0,1}, {devices=[1,4]0,1,2,3}}"));
EXPECT_THAT(tgte_0, op::Sharding("{devices=[1,2]0,1}"));
EXPECT_THAT(ttr, op::Sharding("{devices=[2,1]0,1}"));
EXPECT_THAT(fgte_0, op::Sharding("{devices=[1,3]0,1,2}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
for (HloInstruction* instruction :
{tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
}
for (HloInstruction* instruction : {tr, fr, conditional, fp}) {
const std::vector<HloSharding>& shardings =
instruction->sharding().tuple_elements();
EXPECT_THAT(shardings[0], ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(shardings[1], ShardingMetadata({CreateMetadata("c")}));
}
for (HloInstruction* instruction : {tgte_0, ttr}) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
}
EXPECT_THAT(fgte_0->sharding(), ShardingMetadata({CreateMetadata("b")}));
} else {
for (HloInstruction* instruction : instructions) {
if (instruction->sharding().IsTuple()) {
for (const HloSharding& tuple_element :
instruction->sharding().tuple_elements()) {
EXPECT_THAT(tuple_element, ShardingMetadata({}));
}
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
}
TEST_P(ParameterizedMetadataTest, TupleFromUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[13] parameter(0)
%p1 = f32[15] parameter(1)
%p2 = f32[17] parameter(2)
%t0 = (f32[13], f32[15]) tuple(%p0, %p1)
%t1 = ((f32[13], f32[15]), f32[17]) tuple(%t0, %p2)
%gte.0 = (f32[13], f32[15]) get-tuple-element(%t1), index=0
%gte.1 = f32[13] get-tuple-element(%gte.0), index=0
%gte.2 = f32[15] get-tuple-element(%gte.0), index=1
%gte.3 = f32[17] get-tuple-element(%t1), index=1
ROOT %t2 = (f32[13], f32[15], f32[17]) tuple(%gte.1, %gte.2, %gte.3),
sharding={{replicated metadata={op_name="a"}},
{devices=[2]0,1 metadata={op_name="b"}},
{devices=[3]1,2,3 metadata={op_name="c"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* t0 = FindInstruction(module.get(), "t0");
ASSERT_NE(t0, nullptr);
EXPECT_THAT(t0, op::Sharding("{{replicated}, {devices=[2]0,1}}"));
auto* t1 = FindInstruction(module.get(), "t1");
ASSERT_NE(t1, nullptr);
EXPECT_THAT(
t1, op::Sharding("{{replicated}, {devices=[2]0,1}, {devices=[3]1,2,3}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(t0->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(t0->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(t1->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(t1->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(t1->sharding().tuple_elements()[2],
ShardingMetadata({CreateMetadata("c")}));
} else {
for (HloInstruction* instruction : {t0, t1}) {
for (const HloSharding& sub_sharding :
instruction->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%ds = f32[11,1,15] dynamic-slice(%shard-barrier-from, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "ds");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPass) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "ds");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPass) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15},
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "c0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%ds = f32[11,1,15] dynamic-slice(%shard-barrier-to, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15},
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "c0");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassBase) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0)
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* dus = FindInstruction(module.get(), "dus");
ASSERT_NE(dus, nullptr);
EXPECT_THAT(dus, op::Sharding("{devices=[2,2,2]<=[8]}"));
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(
c1, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
for (HloInstruction* instruction : {dus, c1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-from, %c1, %i0, %p2, %i0)
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* dus = FindInstruction(module.get(), "dus");
ASSERT_NE(dus, nullptr);
EXPECT_FALSE(dus->has_sharding());
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassUpdate) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0)
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* dus = FindInstruction(module.get(), "dus");
ASSERT_NE(dus, nullptr);
EXPECT_THAT(
dus, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_THAT(
c0, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
for (HloInstruction* instruction : {dus, c0}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPass) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_THAT(c0, op::Sharding("{devices=[2,2,2]<=[8]}"));
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(
c1, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
for (HloInstruction* instruction : {c0, c1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-to, %c1, %i0, %p2, %i0),
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_FALSE(c0->has_sharding());
}
TEST_P(ParameterizedMetadataTestWithOutput, EinsumLHSBatchPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs),
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs)
%conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32}
ROOT %copy = f32[32,24,39296] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* rhs_copy = FindInstruction(module.get(), "rhs.copy");
ASSERT_NE(rhs_copy, nullptr);
EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}"));
auto* conv = FindInstruction(module.get(), "conv");
ASSERT_NE(conv, nullptr);
EXPECT_THAT(conv, op::Sharding("{devices=[2,1,1]0,1}"));
for (HloInstruction* instruction : {rhs_copy, conv}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,1]0,1}"));
}
}
TEST_P(ParameterizedMetadataTest, EinsumOutputBatchPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs)
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs)
%conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32},
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs_copy = FindInstruction(module.get(), "lhs.copy");
ASSERT_NE(lhs_copy, nullptr);
EXPECT_THAT(lhs_copy, op::Sharding("{devices=[2,1,1]0,1}"));
auto* rhs_copy = FindInstruction(module.get(), "rhs.copy");
ASSERT_NE(rhs_copy, nullptr);
EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}"));
for (HloInstruction* instruction : {lhs_copy, rhs_copy}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, EinsumLHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs),
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}}
%rhs = f32[32,39296,64,1] parameter(1)
%rhs.copy = f32[32,39296,64,1] copy(%rhs)
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumOutputLHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs)
%rhs = f32[32,39296,64,1] parameter(1)
%rhs.copy = f32[32,39296,64,1] copy(%rhs)
ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1},
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "lhs.copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumRHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs)
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}}
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumOutputRHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs)
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs)
ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1},
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "rhs.copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumChooseLargerOperand) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs),
sharding={devices=[1,4,1,1]0,1,2,3 metadata={op_name="a"}}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="b"}}
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumChooseBatchFirst) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[2,1,1,1]0,1 metadata={op_name="b"}}
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromIndex) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[2,3,4] parameter(1),
sharding={devices=[1,2,1]0,1 metadata={op_name="b"}}
%gather = f32[3,4,9] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,9}
ROOT %copy = f32[3,4,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromIndex_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
%gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}
ROOT %copy = f32[3,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromDataOperand) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2]0,1 metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}
ROOT %copy = f32[3,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromDataOperand_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}
ROOT %copy = f32[3,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1)
%indices = s32[3] copy(%p1)
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[2,1]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1)
%indices = s32[3] copy(%p1)
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex2) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[2,4819,4] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[2,1000,2] parameter(1)
%indices = s32[2,1000,2] copy(%p1)
ROOT %gather = bf16[2,1000,4]
gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices),
offset_dims={2}, collapsed_slice_dims={0,1},
start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4},
sharding={devices=[1,2,1]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex2_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[2,4819,4] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[2,1000,2] parameter(1)
%indices = s32[2,1000,2] copy(%p1)
ROOT %gather = bf16[2,1000,4]
gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices),
offset_dims={2}, collapsed_slice_dims={0,1},
start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4},
sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex3) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[2,4819,4] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[2,2,1000] parameter(1)
%indices = s32[2,2,1000] copy(%p1)
ROOT %gather = bf16[2,1000,4]
gather(bf16[2,4819,4] %input, s32[2,2,1000] %indices),
offset_dims={2}, collapsed_slice_dims={0,1},
start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4},
sharding={devices=[1,2,1]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToDataOperand) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[1,2]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToDataOperand_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DataOperandToScatter) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2]0,1 metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DataOperandToScatter_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DataOperandToScatter_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={devices=[1,4]0,1,2,3 metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates.0 = f32[3,9] parameter(3),
sharding={replicated metadata={op_name="d"}}
%updates.1 = f32[3,9] parameter(4),
sharding={replicated metadata={op_name="e"}}
%scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,4]0,1,2,3}, {devices=[1,2,2]0,1,2,3 "
"last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={devices=[1,2]0,1 metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates.0 = f32[3,9] parameter(3),
sharding={devices=[1,4]0,1,2,3 metadata={op_name="d"}}
%updates.1 = f32[3,9] parameter(4),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="e"}}
%scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,4] 0,1,2,3}, {devices=[1,2,2]0,1,2,3 "
"last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("d")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("e")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="b"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToDataOperand) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="b"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2]0,1 metadata={op_name="c"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input.0 = f32[2,9] copy(%p0)
%p1 = f32[2,9] parameter(1)
%input.1 = f32[2,9] copy(%p1)
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="a"}}
%updates.0 = f32[3,9] parameter(3),
sharding={replicated metadata={op_name="b"}}
%updates.1 = f32[3,9] parameter(4),
sharding={replicated metadata={op_name="c"}}
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={{devices=[1,4]0,1,2,3 metadata={op_name="d"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="e"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("d")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
instruction = FindInstruction(module.get(), "input.1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("e")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%p2 = f32[3,9] parameter(2)
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%p2 = f32[3,9] parameter(2)
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0)
%input.1 = f32[2,9] parameter(1)
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="a"}}
%p3 = f32[3,9] parameter(3)
%updates.0 = f32[3,9] copy(%p3)
%p4 = f32[3,9] parameter(4)
%updates.1 = f32[3,9] copy(%p4)
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={{devices=[1,4]0,1,2,3 metadata={op_name="b"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
instruction = FindInstruction(module.get(), "updates.1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3] copy(%p1)
%updates = f32[3,9] parameter(2),
sharding={devices=[2,1]0,1 metadata={op_name="c"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex2) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[1,3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[1,3] copy(%p1)
%updates = f32[3,9] parameter(2),
sharding={devices=[2,1]0,1 metadata={op_name="c"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=0,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3] copy(%p1)
%updates = f32[3,9] parameter(2),
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_RankMismatch) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[1,24,24,24,3,3] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[1,24,24,24,5] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[1,24,24,24,5] copy(%p1)
%updates = f32[1,24,24,24,3] parameter(2),
sharding={devices=[1,2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="c"}}
%scatter = f32[1,24,24,24,3,3] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={4},
inserted_window_dims={0,1,2,3,4},
scatter_dims_to_operand_dims={0,1,2,3,4},
index_vector_dim=4,
sharding={replicated metadata={op_name="d"}}
ROOT %copy = f32[1,24,24,24,3,3] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2,2,1]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={replicated metadata={op_name="b"}}
%p2 = s32[3,3] parameter(2),
sharding={replicated metadata={op_name="c"}}
%indices = s32[3,3] copy(%p2)
%updates.0 = f32[3,3,9] parameter(3),
sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="d"}}
%updates.1 = f32[3,3,9] parameter(4),
sharding={devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="e"}}
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2,
sharding={{replicated metadata={op_name="d"}}, {replicated metadata={op_name="e"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("d"), CreateMetadata("e")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={devices=[2]0,1 metadata={op_name="b"}}
%p2 = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
%p2 = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate2_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = bf16[15,8] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[8,1,1] parameter(1),
sharding={devices=[2,1,1,4]0,1,2,3,4,5,6,7
last_tile_dim_replicate metadata={op_name="b"}}
%p2 = bf16[8,1,8] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates = bf16[8,1,8] copy(%p2)
ROOT %scatter = bf16[15,8]{1,0} scatter(bf16[15,8] %input,
s32[8,1,1] %indices, bf16[8,1,8] %updates),
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=2, to_apply=%add,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3,3] parameter(2),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="c"}}
%p3 = f32[3,3,9] parameter(3),
sharding={replicated metadata={op_name="d"}}
%updates.0 = f32[3,3,9] copy(%p3)
%p4 = f32[3,3,9] parameter(4),
sharding={replicated metadata={op_name="e"}}
%updates.1 = f32[3,3,9] copy(%p4)
ROOT %scatter = (f32[2,9],f32[2,9])scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
instruction = FindInstruction(module.get(), "updates.1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%p1 = f32[2,9] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}}
%lhs = f32[2,9] copy(%p0)
%rhs = f32[2,9] copy(%p1)
%add = f32[2,9] add(%lhs, %rhs)
ROOT %copy = f32[2,9] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(lhs, op::Sharding("{devices=[2,2]0,2,1,3}"));
auto* rhs = FindInstruction(module.get(), "rhs");
ASSERT_NE(rhs, nullptr);
EXPECT_THAT(rhs, op::Sharding("{devices=[2,2]0,2,1,3}"));
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[2,2]0,2,1,3}"));
for (HloInstruction* instruction : {lhs, rhs, add}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0),
sharding={devices=[1,2,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%p1 = f32[2,9] parameter(1),
sharding={devices=[2,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}
%lhs = f32[2,9] copy(%p0)
%rhs = f32[2,9] copy(%p1)
%add = f32[2,9] add(%lhs, %rhs)
ROOT %copy = f32[2,9] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(
lhs,
op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* rhs = FindInstruction(module.get(), "rhs");
ASSERT_NE(rhs, nullptr);
EXPECT_THAT(
rhs,
op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(
add,
op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(lhs->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
EXPECT_THAT(rhs->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
EXPECT_THAT(add->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
for (HloInstruction* instruction : {lhs, rhs}) {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingTransposeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0),
sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0}
ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "transpose");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[1,2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingTransposeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0)
%copy = f32[7,11,13]{2,1,0} copy(%param)
ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0},
sharding={devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate.19), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0,
s32[2,8,4]{2,1,0} %shard-barrier-from.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, GatherBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%shard-barrier-to = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-to,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(copy_p, op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromOperandToResult) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]}
%indices = s32[14,10,6,2] parameter(1)
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,"
"3,1) last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromIndicesToResult) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]}
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, GatherBackwardWithExplicitBatchDims) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1)
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4},
sharding={devices=[2,2,2,2]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) "
"last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromOperandToResult) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]}
%indices = s32[14,10,6,2] parameter(1)
%updates = f32[14,10,6,2] parameter(2)
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2,2]<=[16]}"));
}
TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromIndicesToResult) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]}
%updates = f32[14,10,6,2] parameter(2)
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding(
"{devices=[2,1,2,1,4]<=[2,2,4]T(1,0,2) last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromUpdatesToResult) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1)
%updates = f32[14,10,6,4] parameter(2), sharding={devices=[2,2,2,2]<=[16]}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) "
"last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ScatterBackwardWithExplicitBatchDims) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1)
%updates = f32[14,10,6,4] parameter(2)
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[2,2,2,2]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true, true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,2,2]<=[16]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,1,1,4]<=[2,2,2,2]T(2,0,1,3) "
"last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(2),
op::Sharding("{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,3,1) "
"last_tile_dim_replicate}"));
}
TEST_P(ParameterizedMetadataTest, ParallelGatherFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelGatherFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(copy_p, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[4,8,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,4,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(copy_p, op::Sharding("{devices=[4,1,1,1]0,1,4,5}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelGatherFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelGatherFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding(
"{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(
copy_p,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[4,8,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(
copy_p,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ScatterForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.2 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.1), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0,
s32[2,8,4]{2,1,0} %shard-barrier-from.1,
s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ScatterBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%shard-barrier-to.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-to.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{replicated}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(copy_p1, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
}
TEST_P(ParameterizedMetadataTest, ParallelScatterFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterFromUpdateForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(copy_p1, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[4,8,2,2]{3,2,1,0} scatter(
s32[4,8,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0,
sharding={devices=[4,1,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[4,1,1,1]0,1,4,5}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(copy_p1, op::Sharding("{devices=[1,4,1,1]0,1,4,5}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelScatterFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelScatterFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelScatterFromUpdateForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding(
"{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(
copy_p0,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[4,8,2,2]{3,2,1,0} scatter(
s32[4,8,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0,
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(
copy_p0,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding("{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ParallelScatterFromOperandForwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[8,4,2,2]{3,2,1,0} %parameter.1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.2,
s32[8,4,2,2]{3,2,1,0} %parameter.3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[8,1,1,1]0,1,4,5,2,3,6,7},{devices=[4,1,"
"1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
ParallelScatterFromIndexForwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[8,4,2,2]{3,2,1,0} %parameter.1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.2,
s32[8,4,2,2]{3,2,1,0} %parameter.3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 "
"last_tile_dim_replicate},{devices=[4,1,1,1,2]0,1,4,"
"5,2,3,6,7 last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
ParallelScatterFromUpdateForwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={devices=[1,8,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[8,4,2,2]{3,2,1,0} %parameter.1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.2,
s32[8,4,2,2]{3,2,1,0} %parameter.3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,8,1,1]0,1,4,5,2,3,6,7},{devices=[4,1,"
"1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3)
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %copy.p0,
s32[8,4,2,2]{3,2,1,0} %copy.p1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p2,
s32[8,4,2,2]{3,2,1,0} %copy.p3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={{devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}},
{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}}
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p2 = FindInstruction(module.get(), "copy.p2");
ASSERT_NE(copy_p2, nullptr);
EXPECT_THAT(copy_p2, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
auto* copy_p3 = FindInstruction(module.get(), "copy.p3");
ASSERT_NE(copy_p3, nullptr);
EXPECT_THAT(
copy_p3,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
for (HloInstruction* instruction : {copy_p1, copy_p3}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%parameter.1 = s32[4,8,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3)
%scatter = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) scatter(
s32[4,8,2,2]{3,2,1,0} %copy.p0,
s32[4,8,2,2]{3,2,1,0} %copy.p1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p2,
s32[8,4,2,2]{3,2,1,0} %copy.p3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0,
sharding={{devices=[4,1,1,1]0,1,4,5 metadata={op_name="a"}},
{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="b"}}}
ROOT %copy = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[4,1,1,1]0,1,4,5}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p2 = FindInstruction(module.get(), "copy.p2");
ASSERT_NE(copy_p2, nullptr);
EXPECT_THAT(copy_p2, op::Sharding("{devices=[1,4,1,1]0,1,4,5}"));
auto* copy_p3 = FindInstruction(module.get(), "copy.p3");
ASSERT_NE(copy_p3, nullptr);
EXPECT_THAT(
copy_p3,
op::Sharding("{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
for (HloInstruction* instruction : {copy_p1, copy_p3}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndOperandPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndIndexPassthroughFromIndicesForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]0,1,4,5 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
GatherMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,2,1]0,4,1,5 metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedOperandPassthroughAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
GatherMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndOperandPassthroughFromUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndOperandPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndIndexPassthroughFromIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]0,1,4,5 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndIndexPassthroughFromUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,1,2,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndIndexPassthroughFromUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, CorrectlyReplicateGatherIndex) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[1,2,2,2,8]{4,3,2,1,0} parameter(0)
%parameter.1 = s32[1,2,2]{2,1,0} parameter(1)
%index = s32[1,2,2]{2,1,0} copy(%parameter.1)
%gather = bf16[1,2,2,2,8]{4,3,2,1,0} gather(
bf16[1,2,2,2,8]{4,3,2,1,0} %parameter.0, s32[1,2,2]{2,1,0} %index),
offset_dims={2,3,4}, collapsed_slice_dims={0,1}, start_index_map={0,1},
index_vector_dim=2, slice_sizes={1,1,2,2,8},
sharding={devices=[1,1,2,1,1]0,1 metadata={op_name="a"}}
ROOT %copy = bf16[1,2,2,2,8]{4,3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* index = FindInstruction(module.get(), "index");
ASSERT_NE(index, nullptr);
EXPECT_THAT(index, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(index->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(index->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToOperand_ParallelDimIsNotPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[2,1000,1]{2,1,0} parameter(0)
%parameter.1 = bf16[2,4819,4]{2,1,0} parameter(1)
%iota = s32[2,1000,1]{1,0,2} iota(), iota_dimension=0
%operand = bf16[2,4819,4]{2,1,0} copy(%parameter.1)
%index = s32[2,1000,2]{2,1,0} concatenate(s32[2,1000,1]{1,0,2} %iota,
s32[2,1000,1]{2,1,0} %parameter.0), dimensions={2},
sharding={devices=[1,4,1]0,1,2,3}
ROOT %gather = bf16[2,1000,4]{2,1,0} gather(bf16[2,4819,4]{2,1,0} %operand,
s32[2,1000,2]{2,1,0} %index), offset_dims={2},
collapsed_slice_dims={0,1}, start_index_map={0,1},
index_vector_dim=2, slice_sizes={1,1,4},
sharding={devices=[1,4,1]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
EXPECT_THAT(operand, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, ManualSubgroupForward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ManualSubgroup_SingleOperandHasSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1)
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
auto* operand = FindInstruction(module.get(), "copy");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(operand->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ManualSubgroup_OneOperandReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1),
sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="a"}}
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
auto* operand = FindInstruction(module.get(), "copy");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(operand->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ManualSubgroupBackward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0)
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1)
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, SimpleManual) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1}
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
sharding={devices=[2,1]0,1}
%to_manual = f32[3,3] custom-call(%annotate),
custom_call_target="SPMDFullToShardShape", sharding={manual}
%zero = f32[] constant(0)
%reduce = f32[3] reduce(%to_manual, %zero), dimensions={1}, to_apply=%add
%annotate2 = f32[3] custom-call(%reduce), custom_call_target="Sharding",
sharding={manual}
%to_auto = f32[6] custom-call(%annotate2),
custom_call_target="SPMDShardToFullShape", sharding={devices=[2]0,1}
ROOT %copy.2 = f32[6] copy(%to_auto)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{manual}"));
}
TEST_F(ShardingPropagationTest, SimpleManualTuple) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1}
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
sharding={devices=[2,1]0,1}
%to_manual = f32[3,3] custom-call(%annotate),
custom_call_target="SPMDFullToShardShape", sharding={manual}
%t = (f32[3,3]) tuple(%to_manual)
%gte = f32[3,3] get-tuple-element(%t), index=0
%to_auto = f32[3,3] custom-call(%gte),
custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1]0,1}
ROOT %copy.2 = f32[3,3] copy(%to_auto)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "t");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{manual}}"));
instruction = FindInstruction(module.get(), "gte");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{manual}"));
}
TEST_F(ShardingPropagationTest, DefaultManualCustomCallForward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={manual metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1)
%copy.1 = f32[6,3]{1,0} copy(%param1)
%param2 = f32[6,3]{1,0} parameter(2)
%copy.2 = f32[6,3]{1,0} copy(%param2)
%custom-call = (f32[], f32[6,3]{1,0}) custom-call(%copy, %copy.1, %copy.2), custom_call_target="some_custom_call"
ROOT %copy.3 = (f32[], f32[6,3]{1,0}) copy(%custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "custom-call");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{manual},{manual}}"));
}
TEST_F(ShardingPropagationTest, RefineUnspecifiedDims) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1]",
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
%copy.2 = f32[6,3] copy(%annotate)
ROOT %copy.3 = f32[6,3] copy(%copy.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy.2");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}"));
}
TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3,8] parameter(0)
%copy = f32[6,3,8] copy(%param0),
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
%annotate = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%to_manual = f32[3,3,8] custom-call(%annotate),
custom_call_target="SPMDFullToShardShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%to_auto = f32[6,3,8] custom-call(%annotate2),
custom_call_target="SPMDShardToFullShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%copy.2 = f32[6,3,8] copy(%to_auto)
ROOT %copy.3 = f32[6,3,8] copy(%copy.2),
sharding={devices=[1,1,2,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy2 = FindInstruction(module.get(), "copy.2");
ASSERT_NE(copy2, nullptr);
EXPECT_THAT(copy2, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}"));
auto* to_manual = FindInstruction(module.get(), "to_manual");
ASSERT_NE(to_manual, nullptr);
EXPECT_THAT(
to_manual,
op::Sharding(
"{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual}}"));
auto* to_auto = FindInstruction(module.get(), "to_auto");
ASSERT_NE(to_auto, nullptr);
EXPECT_THAT(to_auto, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}"));
}
TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3,8] parameter(0)
%copy = f32[6,3,8] copy(%param0)
%annotate1 = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%to_manual = f32[3,3,8] custom-call(%annotate1),
custom_call_target="SPMDFullToShardShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%annotate3 = f32[3,3,8] custom-call(%annotate2), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%to_auto = f32[6,3,8] custom-call(%annotate3),
custom_call_target="SPMDShardToFullShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%copy.2 = f32[6,3,8] copy(%to_auto),
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
ROOT %copy.3 = f32[6,3,8] copy(%copy.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_THAT(
copy, op::Sharding(
"{devices=[2,2,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, DoNotRefineUnspecifiedDimsOnManual) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3] parameter(0), sharding={manual}
%annotate = f32[6,3] custom-call(%param0), custom_call_target="Sharding",
backend_config="unspecified_dims=[1]", sharding={manual}
ROOT %copy.2 = f32[6,3] copy(%annotate), sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
EXPECT_TRUE(changed);
for (auto* hlo : module->entry_computation()->instructions()) {
EXPECT_TRUE(hlo->sharding().IsManual());
}
}
TEST_F(ShardingPropagationTest, DoNotPassManualShardingToSPMDShardToFullShape) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated}
custom-call.2 = f32[2,3]{1,0} custom-call(p.0), custom_call_target="Sharding", sharding={replicated}
custom-call.3 = f32[2,3]{1,0} custom-call(custom-call.2), custom_call_target="SPMDFullToShardShape", sharding={manual}
custom-call.4 = f32[2,3]{1,0} custom-call(custom-call.3), custom_call_target="Sharding", sharding={manual}
ROOT custom-call.5 = f32[16,3]{1,0} custom-call(custom-call.4), custom_call_target="SPMDShardToFullShape", sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
auto spmd_shard_to_full = module->entry_computation()->root_instruction();
CHECK(spmd_shard_to_full->IsCustomCall("SPMDShardToFullShape"));
EXPECT_FALSE(spmd_shard_to_full->sharding().IsManual());
}
TEST_F(ShardingPropagationTest, ManualShardingPassThroughSplitConstant) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated}
p.1 = f32[2,3]{1,0} parameter(1), sharding={replicated}
constant = f32[2,3]{1,0} constant({{0,1,2},{3,4,5}})
custom-call.0 = f32[2,3]{1,0} custom-call(p.0), custom_call_target="Sharding", sharding={replicated}
custom-call.1 = f32[2,3]{1,0} custom-call(custom-call.0), custom_call_target="SPMDFullToShardShape", sharding={manual}
add.0 = f32[2,3]{1,0} add(constant, custom-call.1)
custom-call.2 = f32[2,3]{1,0} custom-call(add.0), custom_call_target="SPMDShardToFullShape", sharding={replicated}
add.1 = f32[2,3]{1,0} add(constant, p.1)
ROOT tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}) tuple(custom-call.2, add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool is_split,
HloConstantSplitter(true).Run(module.get()));
EXPECT_TRUE(is_split);
TF_ASSERT_OK_AND_ASSIGN(auto _, HloDCE().Run(module.get()));
(void)_;
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* add0 = FindInstruction(module.get(), "add.0");
const HloInstruction* manual_constant = add0->operand(0);
EXPECT_TRUE(manual_constant->IsConstant() &&
manual_constant->sharding().IsManual());
const HloInstruction* add1 = FindInstruction(module.get(), "add.1");
const HloInstruction* replicate_constant = add1->operand(0);
EXPECT_TRUE(replicate_constant->IsConstant() &&
replicate_constant->sharding().IsReplicated());
}
TEST_F(ShardingPropagationTest, ReshapeNoMatchSubgroupManual) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1,3,3] parameter(0),
sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dims={manual}}
%reshape = f32[3,1,3,1] reshape(%param0)
ROOT %copy = f32[3,1,3,1] copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[1,1,1,1,2,2]0,2,1,3 last_tile_dims={manual,replicated}}"));
}
TEST_F(ShardingPropagationTest, X64Combine) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[102,192,192] parameter(0),
sharding={devices=[1,2,2]0,1,2,3}
%param1 = f32[102,192,192] parameter(1),
sharding={devices=[1,2,2]0,1,2,3}
%custom-call = f64[102,192,192] custom-call(f32[102,192,192] %param0, f32[102,192,192] %param1), custom_call_target="X64Combine"
ROOT %copy = f64[102,192,192] copy(%custom-call),
sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "custom-call");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, LayoutConstraint) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[102,192,192] parameter(0),
sharding={devices=[1,2,2]0,1,2,3}
%custom-call = f32[102,192,192]{0,1,2} custom-call(f32[102,192,192] %param0), custom_call_target="LayoutConstraint"
ROOT %copy = f32[102,192,192] copy(%custom-call),
sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "custom-call");
EXPECT_THAT(instruction->shape(), ShapeUtil::MakeShapeWithDenseLayout(
F32, {102, 192, 192}, {0, 1, 2}));
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, OffloadingPropagation) {
const char* const hlo_string = R"(
HloModule module
ENTRY %offloading {
%param0 = f32[1,256,128] parameter(0), sharding={devices=[1,1,4]0,1,2,3}
%zero = f32[] constant(0.0)
%broadcast = f32[256,256,128] broadcast(%zero), dimensions={}
%izero = s32[] constant(0)
%custom-call.0 = f32[1,256,128] custom-call(f32[1,256,128] %param0), custom_call_target="MoveToHost"
%dynamic-update-slice = f32[256,256,128] dynamic-update-slice(%broadcast, %custom-call.0, %izero, %izero, %izero)
%dynamic-slice = f32[1,256,128] dynamic-slice(%dynamic-update-slice, %izero, %izero, %izero), dynamic_slice_sizes={1,256,128}
%custom-call.1 = f32[1,256,128] custom-call(f32[1,256,128] %dynamic-slice), custom_call_target="MoveToDevice"
ROOT %copy = f32[1,256,128] copy(%custom-call.1), sharding={devices=[1,4,1]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* to_host = FindInstruction(module.get(), "custom-call.0");
EXPECT_THAT(to_host, op::Sharding("{devices=[1,1,4]0,1,2,3}"));
auto* from_host_input =
FindInstruction(module.get(), "custom-call.1")->operand(0);
EXPECT_THAT(from_host_input, op::Sharding("{devices=[1,1,4]0,1,2,3}"));
}
TEST_P(ParameterizedMetadataTest, PropagateThroughSingleUsers) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[10,10], f32[10,10]) parameter(0)
%count.cond = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT
}
%body {
%vars = (u32[], f32[10,10], f32[10,10]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%acc = f32[10,10] get-tuple-element((u32[], f32[10,10],f32[10,10]) %vars), index=1
%cvt = s32[10,10] convert(acc)
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one)
%acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt), sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
%acc.1 = f32[10,10] convert(acc.i)
ROOT %tuple = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc, f32[10,10] %acc.1)
}
ENTRY %entry {
%p0 = f32[10,10] parameter(0)
%p0.copy = f32[10,10] copy(f32[10,10] %p0), sharding={devices=[4,1]0,1,2,3}
%p1 = f32[10,10] parameter(1)
%p2 = f32[10,10] parameter(2)
%p2.copy = f32[10,10] copy(f32[10,10] %p2)
%zero = u32[] constant(0)
%init = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy, f32[10,10] %p2.copy)
%while = (u32[], f32[10,10], f32[10,10]) while((u32[], f32[10,10], f32[10,10]) %init),
body=%body, condition=%cond
%g1 = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=0
%g2 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=1
%g3 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=2
ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2, %g3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto body_root = FindInstruction(module.get(), "tuple");
EXPECT_NE(nullptr, body_root);
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
VLOG(1) << "Mod:";
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* convert_instr = FindInstruction(module.get(), "cvt");
EXPECT_THAT(convert_instr, op::Sharding("{devices=[4,1]0,1,2,3}"));
}
TEST_P(ParameterizedMetadataTest, NestedTupleFromUserSharding) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT
}
%body {
%vars = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%fwd = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%vars), index=1
%acc = f32[10,10] get-tuple-element(%vars), index=2
%cvt = s32[10,10] convert(acc)
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one)
%acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt)
%acc.1 = f32[10,10] convert(acc.i)
ROOT %tuple = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%count.1, %fwd, %acc.1)
}
ENTRY %entry {
%p0 = f32[10,10] parameter(0)
%p0.copy = f32[10,10] copy(f32[10,10] %p0)
%p1 = f32[10,10] parameter(1)
%p1.copy = f32[10,10] copy(f32[10,10] %p1)
%p2 = f32[10,10] parameter(2)
%p2.copy = f32[10,10] copy(f32[10,10] %p2)
%zero = u32[] constant(0)
%zerof = f32[] constant(0)
%init0 = (f32[10,10], f32[10,10]) tuple(%p0.copy, %p1.copy)
%init1 = ((f32[10,10], f32[10,10]), f32[]) tuple(%init0, %zerof)
%init = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%zero, %init1, %p2.copy)
%while = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) while(%init),
body=%body, condition=%cond
%g1 = u32[] get-tuple-element(%while), index=0
%g2 = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%while), index=1
%g2.0 = (f32[10,10], f32[10,10]) get-tuple-element(%g2), index=0
%g2.0.0 = f32[10,10] get-tuple-element(%g2.0), index=0
%g3 = f32[10,10] get-tuple-element(%while), index=2
%copy.g3 = f32[10,10] copy(%g3), sharding={devices=[4,1]0,1,2,3}
ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2.0.0, %g3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto body_root = FindInstruction(module.get(), "tuple");
EXPECT_NE(nullptr, body_root);
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* convert_instr =
FindInstruction(module.get(), "p2.copy");
EXPECT_THAT(convert_instr, op::Sharding("{devices=[4,1]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, CSEPreventionOnly) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%add = f32[4] add(%br, %br)
%annotate = f32[4] custom-call(%add), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={replicated}
ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false},
true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* br = FindInstruction(module.get(), "br");
EXPECT_THAT(br, op::Sharding("{devices=[4]0,1,2,3}"));
EXPECT_THAT(br->sharding(), ShardingMetadata({CreateMetadata(
"_sharding_propagation_cse_prevention")}));
EXPECT_THAT(FindInstruction(module.get(), "annotate"),
AllOf(op::Sharding("{replicated}"), op::CustomCall()));
EXPECT_FALSE(FindInstruction(module.get(), "add")->has_sharding());
}
TEST_F(ShardingPropagationTest, RemoveCSEPrevention) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={},
sharding={devices=[4]0,1,2,3 metadata={op_name="_sharding_propagation_cse_prevention"}}
%add = f32[4] add(%br, %br)
%annotate = f32[4] custom-call(%add), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={replicated}
ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]3,2,1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "br"),
op::Sharding("{devices=[4]3,2,1,0}"));
EXPECT_THAT(FindInstruction(module.get(), "add"),
op::Sharding("{devices=[4]3,2,1,0}"));
}
TEST_F(ShardingPropagationTest, ReshapeTrivialDimPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[8,128] parameter(0), sharding={replicated}
%c = f32[8,128] copy(%param0)
%rsp = f32[8,1,128] reshape(%c),
sharding={devices=[1,2,4]0,1,2,3,4,5,6,7}
ROOT %copy = f32[8,1,128] copy(%rsp),
sharding={devices=[1,2,4]0,1,2,3,4,5,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
FindInstruction(module.get(), "c"),
op::Sharding("{devices=[1,4,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, EmptyTupleWithinTuple) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[2] parameter(0), sharding={replicated}
%et = () tuple()
%tuple = (f32[2], (), (), f32[2]) tuple(%param0, %et, %et, %param0)
ROOT %copy = (f32[2], (), (), f32[2]) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
}
TEST_F(ShardingPropagationTest, ContractingAsNonContractingCrash) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[20,64,56,56]{3,2,1,0} parameter(0), sharding={replicated}
%p1 = f32[1,1,256,64]{2,3,1,0} parameter(1), sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7}
%convolution.4512 = f32[20,256,56,56]{3,2,1,0} convolution(%p0, %p1), window={size=1x1}, dim_labels=bf01_01oi->bf01
ROOT %copy = f32[20,256,56,56]{3,2,1,0} copy(%convolution.4512)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
}
TEST_F(ShardingPropagationTest, PropagateReduceManualTuple) {
const char* const hlo_string = R"(
HloModule pjit
orclone {
lhs.1 = u32[] parameter(0)
rhs.1 = u32[] parameter(2)
or.2 = u32[] or(lhs.1, rhs.1)
lhs.0 = u32[] parameter(1)
rhs.0 = u32[] parameter(3)
or.3 = u32[] or(lhs.0, rhs.0)
ROOT tuple.4 = (u32[], u32[]) tuple(or.2, or.3)
}
ENTRY %main.21 {
select.104 = u32[2,2]{1,0} parameter(0), sharding={manual}
shift-left.5 = u32[2,2]{1,0} parameter(1), sharding={manual}
constant.4183 = u32[] constant(0), sharding={manual}
reduce.1 = (u32[2]{0}, u32[2]{0}) reduce(shift-left.5, select.104, constant.4183, constant.4183), dimensions={1}, to_apply=orclone
ROOT get-tuple-element.13 = u32[2]{0} get-tuple-element(reduce.1), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
EXPECT_THAT(FindInstruction(module.get(), "reduce.1"),
op::Sharding("{{manual}, {manual}}"));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
}
TEST_F(ShardingPropagationTest, MergeCompatibleTiles) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
p = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(0), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}
p2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(1), sharding={devices=[4,1,1,1,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
c0 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p)
c1 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p2)
a = bf16[8,4,256,1024,12288]{4,3,2,1,0} add(c0, c1)
ROOT c2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(a), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "c1"),
op::Sharding("{devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}"));
}
TEST_F(ShardingPropagationTest, OutfeedUser) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
p = f32[10,128]{1,0} parameter(0)
c = f32[10,128]{1,0} copy(p)
t = (f32[10,128]{1,0}) tuple(c)
a = token[] after-all()
ROOT of = token[] outfeed((f32[10,128]{1,0}) %t, token[] %a), outfeed_shape=(f32[10,128]{1,0}), sharding={{devices=[2,1]0,1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "c"),
op::Sharding("{devices=[2,1]0,1}"));
}
TEST_F(ShardingPropagationTest, SortForwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7}
%shard-barrier-from = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-from), dimensions={1}, is_stable=true, to_apply=compare
ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_FALSE(FindInstruction(module.get(), "sort.0")->has_sharding());
}
TEST_F(ShardingPropagationTest, SortBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0)
%shard-barrier-to = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-to), dimensions={1}, is_stable=true, to_apply=compare,
sharding={devices=[1,8]0,1,2,3,4,5,6,7}
ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(FindInstruction(module.get(), "negate.0"),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankOne) {
const char* const hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024]{0} parameter(0)
negate.0 = f32[1024]{0} negate(param.0), sharding={devices=[8]0,1,2,3,4,5,6,7}
iota.0 = s32[1024]{0} iota(), iota_dimension=0
sort.0 = (f32[1024]{0}, s32[1024]{0}) sort(negate.0, iota.0), dimensions={0}, is_stable=true, to_apply=compare
ROOT copy.0 = (f32[1024]{0}, s32[1024]{0}) copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankTwo) {
const char* const hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0}, s32[1024,1024]{1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7}
iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=1
sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare
ROOT copy.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "iota.0"),
op::Sharding("{devices=[1,8]0,1,2,3,4,5,6,7}"));
EXPECT_THAT(
FindInstruction(module.get(), "sort.0"),
op::Sharding(
"{{devices=[1,8]0,1,2,3,4,5,6,7}, {devices=[1,8]0,1,2,3,4,5,6,7}}"));
}
TEST_F(ShardingPropagationTest, ConditionalManual) {
const char* const hlo_string = R"(
HloModule module
%true_comp {
%tp = (f32[3,5], f32[]) parameter(0)
%tgte.0 = f32[3,5] get-tuple-element(%tp), index=0
%tgte.1 = f32[] get-tuple-element(%tp), index=1
%ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0}
%broadcast.1 = f32[5,3] broadcast(%tgte.1), dimensions={}
%add.1 = f32[5,3] add(%broadcast.1, %ttr)
ROOT %tr = (f32[5,3], f32[]) tuple(%add.1, %tgte.1)
}
%false_comp {
%fp = (f32[5,3], f32[5,3], f32[]) parameter(0)
%fgte.0 = f32[5,3] get-tuple-element(%fp), index=0
%fgte.1 = f32[] get-tuple-element(%fp), index=2
ROOT %fr = (f32[5,3], f32[]) tuple(%fgte.0, %fgte.1)
}
ENTRY entry {
%cond = pred[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
%tp.0 = f32[3,5] parameter(1), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}}
%fp.0 = f32[5,3] parameter(2), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}}
%const0 = f32[] constant(0)
%const1 = f32[] constant(1)
%true_param = (f32[3,5], f32[]) tuple(%tp.0, %const0)
%false_param = (f32[5,3], f32[5,3], f32[]) tuple(%fp.0, fp.0, %const1)
ROOT %conditional = (f32[5,3], f32[]) conditional(
%cond, %true_param, %false_param),
true_computation=%true_comp,
false_computation=%false_comp
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tp = FindInstruction(module.get(), "tp");
auto* true_param = FindInstruction(module.get(), "true_param");
EXPECT_EQ(tp->sharding(), true_param->sharding());
auto* fp = FindInstruction(module.get(), "fp");
auto* false_param = FindInstruction(module.get(), "false_param");
EXPECT_EQ(fp->sharding(), false_param->sharding());
}
TEST_F(ShardingPropagationTest, WhileDSManual) {
const char* const hlo_string = R"(
HloModule module
while.condition {
arg_tuple = (s32[], pred[2,8,4]) parameter(0)
tripcount = s32[] get-tuple-element(arg_tuple), index=0
triplimit = s32[] constant(2)
ROOT compare.0 = pred[] compare(tripcount, triplimit), direction=LT
}
while.body {
arg_tuple = (s32[], pred[2,8,4]) parameter(0)
tripcount = s32[] get-tuple-element(arg_tuple), index=0
one = s32[] constant(0)
tripcount_next = s32[] add(tripcount, one)
preds.1 = pred[2,8,4] get-tuple-element(arg_tuple), index=1
zero.1 = s32[] constant(0)
dynamic-slice.1 = pred[1,8,4] dynamic-slice(preds.1, tripcount, zero.1, zero.1), dynamic_slice_sizes={1,8,4}, sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}}
ROOT result = (s32[], pred[2,8,4]) tuple(tripcount_next, preds.1)
}
ENTRY entry {
preds = pred[2,8,4] parameter(0), sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}}
zero = s32[] constant(0)
tuple.13 = (s32[], pred[2,8,4]) tuple(zero, preds)
ROOT result = while(tuple.13), condition=while.condition, body=while.body
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple.13");
EXPECT_THAT(tuple, op::Sharding("{{replicated}, {devices=[1,1,1,2,4]<=[8] "
"last_tile_dims={manual, replicated}}}"));
}
TEST_F(ShardingPropagationTest, PropagateToOutput) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%annotate = f32[4] custom-call(%br), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
ROOT %add = f32[4] add(%annotate, %annotate), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToOutputTuplePartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%annotate = f32[4] custom-call(%br), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add = f32[4] add(%annotate, %annotate)
%param1 = f32[] parameter(1), sharding={replicated}
%br1 = f32[4] broadcast(%param1), dimensions={}
%annotate1 = f32[4] custom-call(%br1), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add1 = f32[4] add(%annotate1, %annotate1)
ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]0,1,2,3},{replicated}}"));
}
TEST_F(ShardingPropagationTest, PropagateToOutputTupleFull) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%annotate = f32[4] custom-call(%br), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add = f32[4] add(%annotate, %annotate)
%param1 = f32[] parameter(1), sharding={replicated}
%br1 = f32[4] broadcast(%param1), dimensions={}
%annotate1 = f32[4] custom-call(%br1), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add1 = f32[4] add(%annotate1, %annotate1)
ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]0,1,2,3},{devices=[4]0,1,2,3}}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, false})
.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, ShardingPropagation(true).Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled3) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false})
.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled4) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, false})
.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial3) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial4) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersFull1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[4]0,1,2,3}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersFull2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[4]0,1,2,3}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = (f32[4], f32[4]) parameter(0)
%gte0 = f32[4] get-tuple-element(%param), index=0
%gte1 = f32[4] get-tuple-element(%param), index=1
ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{devices=[4]0,1,2,3}, {devices=[4]0,1,2,3}}"));
}
TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}}
%gte0 = f32[4] get-tuple-element(%param), index=0
%gte1 = f32[4] get-tuple-element(%param), index=1
ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{replicated}, {devices=[4]0,1,2,3}}"));
}
TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}}
%gte0 = f32[4] get-tuple-element(%param), index=0
%gte1 = f32[4] get-tuple-element(%param), index=1
ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{devices=[4]0,1,2,3}, {replicated}}"));
}
TEST_F(ShardingPropagationTest, PropagateManualOutfeed) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p0 = f32[8]{0} parameter(0)
p1 = f32[1]{0} parameter(1)
tuple.1 = (f32[8]{0}) tuple(p0)
constant.8 = u32[2]{0} constant({3, 12})
tuple.10 = (u32[2]{0}) tuple(constant.8)
aa.1 = token[] after-all()
outfeed.1 = token[] outfeed(tuple.10, aa.1), outfeed_shape=(u32[2]{0}), sharding={{manual}, {manual}}
outfeed.2 = token[] outfeed(tuple.1, outfeed.1), outfeed_shape=(f32[8]{0}), sharding={{manual}, {manual}}
ROOT tuple.15 = (f32[1]{0}, token[]) tuple(p1, outfeed.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, true},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{replicated}, {manual}}"));
}
TEST_F(ShardingPropagationTest, PropagateFromDanglingShardingCustomCall) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = s32[40000]{0} parameter(0)
add = s32[40000]{0} add(p.0, p.0)
cc = s32[40000]{0} custom-call(add), custom_call_target="Sharding", sharding={devices=[4]0,1,2,3}
ROOT mul = s32[40000]{0} multiply(add, add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool dce_ed, RunHloPass(&dce, module.get()));
EXPECT_TRUE(dce_ed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "param0");
EXPECT_EQ(instruction, nullptr);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToParameterIfNotDivisible_WithSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[3] parameter(1), sharding={replicated}
%pad_value = f32[] constant(0)
%pad = f32[4] pad(%param1, %pad_value), padding=0_1
ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToParameterIfNotDivisible_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[3] parameter(1)
%pad_value = f32[] constant(0)
%pad = f32[4] pad(%param1, %pad_value), padding=0_1
ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, DoNotPropagateToTupleParameterIfNotDivisible) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = (f32[4], f32[3]) parameter(0), sharding={{replicated}, {replicated}}
%gte0 = f32[4] get-tuple-element(%param0), index=0
%gte1 = f32[3] get-tuple-element(%param0), index=1
%pad_value = f32[] constant(0)
%pad = f32[4] pad(%gte1, %pad_value), padding=0_1
ROOT %add = f32[4] add(%gte0, %pad), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{replicated}, {replicated}}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputIfNotDivisible_WithSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputIfNotDivisible_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputTupleIfNotDivisible_WithSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
%slice = f32[3] slice(%add), slice={[0:3:1]}
ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice), sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false, true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{replicated}, {replicated}}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputTupleIfNotDivisible_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
%slice = f32[3] slice(%add), slice={[0:3:1]}
ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]0,1,2,3}, {replicated}}"));
}
TEST_F(ShardingPropagationTest, PropagateShardLikeDifferentSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_like 0}
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_like 0}
ROOT mul = s32[16,16] multiply(add.1, add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_0 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_0, nullptr);
auto* add_1 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_1, nullptr);
EXPECT_NE(add_0->sharding(), add_1->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardLikeSameSharding) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %add = s32[] add(%lhs, %rhs)
}
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
p.1 = s32[16,16] parameter(1)
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_like 0}
init = s32[] constant(0)
reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_like 0}
reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add
ROOT mul = s32[] multiply(reduce.1, reduce.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_1 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_1, nullptr);
auto* add_2 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_2, nullptr);
EXPECT_EQ(add_1->sharding(), add_2->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAs) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_as 0}
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_as 0}
ROOT mul = s32[16,16] multiply(add.1, add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_1 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_1, nullptr);
auto* add_2 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_2, nullptr);
EXPECT_EQ(add_1->sharding(), add_2->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAsToParameters) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %add = s32[] add(%lhs, %rhs)
}
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={unknown shard_as 0}
p.1 = s32[16,16] parameter(1), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
init = s32[] constant(0)
reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_as 0}
reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add
ROOT mul = s32[] multiply(reduce.1, reduce.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, true})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* p_0 = FindInstruction(module.get(), "p.0");
ASSERT_NE(p_0, nullptr);
auto* add_2 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_2, nullptr);
EXPECT_THAT(add_2, op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}"));
EXPECT_EQ(p_0->sharding(), add_2->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAsToOutputs) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %add = s32[] add(%lhs, %rhs)
}
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_as 0}
init = s32[] constant(0)
reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add
broadcast.1 = s32[16,16] broadcast(reduce.1), dimensions={}
ROOT mul = s32[16,16] multiply(broadcast.1, broadcast.1), sharding={unknown shard_as 0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_1 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_1, nullptr);
auto* output = FindInstruction(module.get(), "mul");
ASSERT_NE(output, nullptr);
EXPECT_THAT(add_1, op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}"));
EXPECT_EQ(add_1->sharding(), output->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput) {
const char* const hlo_string = R"(
HloModule jit_zeros_like
ENTRY main.6 {
Arg_0.1 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
custom-call.4 = s64[8,2]{1,0} custom-call(Arg_0.1), custom_call_target="Sharding", sharding={unknown shard_as 0}
constant.2 = s64[] constant(0)
broadcast.3 = s64[8,2]{1,0} broadcast(constant.2), dimensions={}
ROOT custom-call.5 = s64[8,2]{1,0} custom-call(broadcast.3), custom_call_target="Sharding", sharding={unknown shard_as 0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}"));
}
TEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput2) {
const char* const hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[8]{0:T(256)})->(f32[8]{0:T(256)}, f32[8]{0:T(256)})}, allow_spmd_sharding_propagation_to_output={true,true}, num_partitions=4
ENTRY main.9 {
Arg_0.1 = f32[8]{0} parameter(0)
custom-call.6 = f32[8]{0} custom-call(Arg_0.1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name="jit(f)/jit(main)/shard_alike" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=206}
custom-call.4 = f32[8]{0} custom-call(Arg_0.1), custom_call_target="Sharding", sharding={devices=[4]<=[4]}, metadata={op_name="jit(f)/jit(main)/sharding_constraint[sharding=GSPMDSharding({devices=[4]<=[4]}) resource_env=ResourceEnv(mesh=Mesh(), ()) unconstrained_dims=set()]" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=204}
constant.0 = f32[] constant(2)
broadcast.0 = f32[8]{0} broadcast(constant.0), dimensions={}
multiply.5 = f32[8]{0} multiply(custom-call.4, broadcast.0), metadata={op_name="jit(f)/jit(main)/mul" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=205}
custom-call.7 = f32[8]{0} custom-call(multiply.5), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name="jit(f)/jit(main)/shard_alike" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=206}
ROOT tuple.8 = (f32[8]{0}, f32[8]{0}) tuple(custom-call.6, custom-call.7)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]<=[4]}, {devices=[4]<=[4]}}"));
}
TEST_F(ShardingPropagationTest, LookaheadUsersOfDot) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p0 = bf16[512,512,1024]{2,1,0} parameter(0), sharding={devices=[16,1,4]<=[64]}
p1 = bf16[512,512,16,128]{3,2,1,0} parameter(1), sharding={devices=[16,1,4,1]<=[64]}
p2 = bf16[16,1024,16,128]{3,2,1,0} parameter(2), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}
p3 = s32[] parameter(3)
dot.1 = bf16[1024,16,128]{2,1,0} dot(p0, p1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
reshape.1 = bf16[1,1024,16,128]{3,2,1,0} reshape(dot.1)
constant.1 = s32[] constant(0)
ROOT dynamic-update-slice.113 = bf16[16,1024,16,128]{3,2,1,0} dynamic-update-slice(p2, reshape.1, p3, constant.1, constant.1, constant.1), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "dot.1");
EXPECT_THAT(instruction,
op::Sharding(
"{devices=[4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, AsyncInstructionManualShardingArray) {
const char* const hlo_string = R"(
HloModule module
called_computation {
p0 = s32[8] parameter(0)
p1 = s32[8] parameter(1)
ROOT add = s32[8] add(p0, p1)
}, execution_thread="thread_1"
ENTRY entry_computation {
p0 = s32[8] parameter(0), sharding={manual}
p1 = s32[8] parameter(1), sharding={manual}
async-start = ((s32[8], s32[8]), s32[8], u32[]) call-start(p0, p1), async_execution_thread="thread_1", to_apply=called_computation
ROOT async-done = s32[8] call-done(async-start)
}, execution_thread="thread_0"
)";
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0"}));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "async-start");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{manual}, {manual}, {manual}, {manual}}"));
auto* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
EXPECT_THAT(async_done, op::Sharding("{manual}"));
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0", "thread_1"}));
EXPECT_FALSE(changed);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_FALSE(changed);
}
}
TEST_F(ShardingPropagationTest, AsyncInstructionManualShardingTuple) {
const char* const hlo_string = R"(
HloModule module
called_computation {
p0 = s32[8] parameter(0)
p1 = s32[8] parameter(1)
add = s32[8] add(p0, p1)
mul = s32[8] multiply(p0, p1)
ROOT result = (s32[8], s32[8]) tuple(add, mul)
}, execution_thread="thread_1"
ENTRY entry_computation {
p0 = s32[8] parameter(0), sharding={manual}
p1 = s32[8] parameter(1), sharding={manual}
async-start = ((s32[8], s32[8]), (s32[8], s32[8]), u32[]) call-start(p0, p1), async_execution_thread="thread_1", to_apply=called_computation
ROOT async-done = (s32[8], s32[8]) call-done(async-start)
}, execution_thread="thread_0"
)";
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0"}));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
EXPECT_THAT(
async_start,
op::Sharding("{{manual}, {manual}, {manual}, {manual}, {manual}}"));
auto* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
EXPECT_THAT(async_done, op::Sharding("{{manual}, {manual}}"));
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0", "thread_1"}));
EXPECT_FALSE(changed);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_FALSE(changed);
}
}
TEST_F(ShardingPropagationTest, ShardAsWithShardBarrier) {
const char* const hlo_string = R"(
HloModule pjit_f
ENTRY main.11 {
Arg_0.1 = bf16[384,1408]{1,0} parameter(0), sharding={devices=[1,16,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate}
broadcast.4 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2}
custom-call.5 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.4), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
broadcast.2 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2}
custom-call.3 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.2), custom_call_target="Sharding", sharding={devices=[8,1,1,1024]<=[8192] last_tile_dim_replicate}, backend_config="unspecified_dims=[1,2]"
custom-call.6 = bf16[8,384,1408]{2,1,0} custom-call(custom-call.3), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
%shard-barrier-to = bf16[8,384,1408]{2,1,0} custom-call(%custom-call.6), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
slice.7 = bf16[1,384,1408]{2,1,0} slice(shard-barrier-to), slice={[1:2], [0:384], [0:1408]}
reshape.8 = bf16[384,1408]{1,0} reshape(slice.7)
tuple.9 = (bf16[384,1408]{1,0}) tuple(reshape.8)
get-tuple-element.10 = bf16[384,1408]{1,0} get-tuple-element(tuple.9), index=0, sharding={devices=[16,1,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate}
ROOT tuple.13 = (bf16[384,1408]{1,0}, bf16[8,384,1408]{2,1,0}) tuple(get-tuple-element.10, custom-call.5)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* broadcast_4 = FindInstruction(module.get(), "broadcast.4");
ASSERT_NE(broadcast_4, nullptr);
EXPECT_THAT(
broadcast_4,
op::Sharding("{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}"));
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_THAT(
copy,
op::Sharding("{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ShardAsWithShardBarrier2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%custom-call.0 = f32[5,7,11,13]{3,2,1,0} custom-call(param0), custom_call_target="Sharding", sharding={devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}, backend_config="unspecified_dims=[1,2,3]"
%shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%custom-call.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%custom-call.2 = f32[5,7,11,13]{3,2,1,0} custom-call(shard-barrier-from), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%custom-call.1 = f32[5,7,11,13]{3,2,1,0} custom-call(param1), custom_call_target="Sharding", sharding={devices=[1,2,2,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}, backend_config="unspecified_dims=[0]"
%custom-call.3 = f32[5,7,11,13]{3,2,1,0} custom-call(custom-call.1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
ROOT %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(%custom-call.0, %custom-call.3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding(
"{{devices=[2,2,2,1]<=[8]}, {devices=[1,2,2,1,2]<=[2,4]T(1,0) "
"last_tile_dim_replicate}}"));
}
TEST_F(ShardingPropagationTest, CallPropagation) {
const absl::string_view hlo_string = R"(
HloModule module
called_computation {
p0 = bf16[20,2,68096,8512] parameter(0)
%add_called_comp = bf16[20,2,68096,8512] add(p0, p0)
ROOT tuple = (bf16[20,2,68096,8512]) tuple(add_called_comp)
}
ENTRY main {
%param0 = bf16[20,2,68096,8512] parameter(0)
%add = bf16[20,2,68096,8512] add(param0, param0)
ROOT %call = (bf16[20,2,68096,8512]) call(add), to_apply=%called_computation, sharding={{devices=[1,1,16,64]<=[64,16]T(1,0)}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[1,1,16,64]<=[64,16]T(1,0)}"));
}
TEST_F(ShardingPropagationTest, CallPropagationWithSPMDShardToFullShape) {
const absl::string_view hlo_string = R"(
HloModule module
called_computation {
p0 = bf16[4096,4096] parameter(0)
%add_called_comp = bf16[4096,4096] add(p0, p0)
ROOT tuple = (bf16[4096,4096]) tuple(add_called_comp)
}
ENTRY main {
%param0 = bf16[4096,4096] parameter(0)
%add = bf16[4096,4096] add(param0, param0)
%custom-call.1 = bf16[4096,4096]{1,0} custom-call(add), custom_call_target="Sharding", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%custom-call.2 = bf16[2048,4096]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
%custom-call.3 = bf16[2048,4096]{1,0} custom-call(custom-call.2), custom_call_target="Sharding", sharding={manual}
%custom-call.4 = bf16[4096,4096]{1,0} custom-call(bf16[2048,4096]{1,0} %custom-call.3), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT %call = (bf16[4096,4096]) call(custom-call.4), to_apply=%called_computation, sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* custom_call_4 = FindInstruction(module.get(), "custom-call.4");
ASSERT_NE(custom_call_4, nullptr);
auto* operand = custom_call_4->operand(0);
EXPECT_THAT(operand, op::Shape("bf16[2048,4096]"));
EXPECT_THAT(custom_call_4, op::Shape("bf16[4096,4096]"));
EXPECT_THAT(custom_call_4,
op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ReplicateRngBitGeneratorSeed) {
const char* const hlo_string = R"(
HloModule module
apply_or {
x = u64[] parameter(0)
y = u64[] parameter(1)
ROOT x_or_y = or(x, y)
}
ENTRY main {
p = s32[2,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
up = u64[2,2] convert(p)
i = u64[] constant(0)
seed = u64[2] reduce(up, i), dimensions={1}, to_apply=apply_or
rbg = u32[2048,4096] rng-bit-generator(seed), algorithm=rng_default
ROOT s = u32[2048,4096]{1,0} custom-call(rbg), custom_call_target="Sharding", sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "seed");
EXPECT_TRUE(instruction->sharding().IsReplicated());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f625f0ba-808b-401d-8be6-8a292e612afc | cpp | tensorflow/tensorflow | algebraic_simplifier | third_party/xla/xla/service/gpu/transforms/algebraic_simplifier.cc | third_party/xla/xla/service/gpu/transforms/algebraic_simplifier_test.cc | #include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/triton/triton_support_legacy.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu {
bool GpuAlgebraicSimplifierVisitor::ShouldStrengthReduceDotToReduce(
const HloInstruction* hlo) {
if (!options_.enable_dot_strength_reduction()) {
return false;
}
const HloDotInstruction* dot = DynCast<HloDotInstruction>(hlo);
if (dot == nullptr) {
return false;
}
const HloInstruction* lhs = dot->operand(0);
const HloInstruction* rhs = dot->operand(1);
DotDimensionNumbers dnums = dot->dot_dimension_numbers();
bool lhs_is_vector = (dnums.lhs_batch_dimensions_size() +
dnums.lhs_contracting_dimensions_size() ==
lhs->shape().rank());
bool rhs_is_vector = (dnums.rhs_batch_dimensions_size() +
dnums.rhs_contracting_dimensions_size() ==
rhs->shape().rank());
if (lhs_is_vector && rhs_is_vector) {
return true;
}
absl::StatusOr<bool> is_too_small =
IsMatrixMultiplicationTooSmallForRewriting(*hlo, 10000000);
CHECK_OK(is_too_small.status());
if (is_too_small.value()) {
return true;
}
return !legacy_triton::CanTritonHandleGEMM(*dot, compute_capability_);
}
} | #include "xla/service/gpu/transforms/algebraic_simplifier.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GpuAlgebraicSimplifierTest : public HloTestBase {};
TEST_F(GpuAlgebraicSimplifierTest, VectorVectorDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 500] parameter(0)
p1 = f32[32, 500] parameter(1)
ROOT dot = f32[32] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, MatrixVectorDotShouldNotBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 5000, 7000] parameter(0)
p1 = f32[32, 5000] parameter(1)
ROOT dot = f32[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_FALSE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest,
DotWithTypeUnsupportedByGemmFusionShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = c64[32, 5000, 7000] parameter(0)
p1 = c64[32, 5000] parameter(1)
ROOT dot = c64[32,7000] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, SmallDotShouldBeStrengthReduced) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[32, 50, 70] parameter(0)
p1 = f32[32, 50] parameter(1)
ROOT dot = f32[32,70] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1},
algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
TEST_F(GpuAlgebraicSimplifierTest, SmallDotShouldBeStrengthReduced2) {
const std::string& hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[2000, 3000] parameter(0)
p1 = f32[2000] parameter(1)
ROOT dot = f32[3000] dot(p0, p1), lhs_contracting_dims={0},
rhs_contracting_dims={0}, algorithm=dot_bf16_bf16_f32_x6
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
AlgebraicSimplifierOptions options;
options.set_enable_dot_strength_reduction(true);
se::CudaComputeCapability ampere(8, 0);
GpuAlgebraicSimplifier simplifier(options, ampere);
GpuAlgebraicSimplifierVisitor visitor(options, ampere, &simplifier);
EXPECT_TRUE(visitor.ShouldStrengthReduceDotToReduce(dot));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/algebraic_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/algebraic_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ca2d24a-803e-4bbb-8f49-b99dbb2bd95d | cpp | tensorflow/tensorflow | compilation_environments | third_party/xla/xla/service/compilation_environments.cc | third_party/xla/xla/service/compilation_environments_test.cc | #include "xla/service/compilation_environments.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
ABSL_CONST_INIT absl::Mutex process_new_env_fns_mu(absl::kConstInit);
absl::flat_hash_map<const tsl::protobuf::Descriptor*,
CompilationEnvironments::ProcessNewEnvFn>*
process_new_env_fns ABSL_GUARDED_BY(process_new_env_fns_mu) = nullptr;
class GlobalCompEnvStats {
public:
static GlobalCompEnvStats& GetSingleton() {
static GlobalCompEnvStats* singleton = new GlobalCompEnvStats();
return *singleton;
}
void DefaultEnvCreatedByCompilationEnvironments(std::string_view env_type)
ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
++stats_[std::string(env_type)]
.default_env_created_by_compilation_environments;
}
VLOG(1) << "New GlobalCompEnvStats value: " << ToString();
}
void EnvAdded(std::string_view env_type) ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
++stats_[std::string(env_type)].env_added;
}
VLOG(1) << "New GlobalCompEnvStats value: " << ToString();
}
std::string ToString() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::ReaderMutexLock l(&mu_);
return absl::StrJoin(
stats_, "; ",
[](std::string* out, const StatMap::value_type& env_stats_pair) {
absl::StrAppend(out, env_stats_pair.first, ": { ",
env_stats_pair.second.ToString(), " }");
});
}
private:
struct PerEnvStats {
std::string ToString() const {
return absl::StrCat(
"# default envs created by CompilationEnvironments: ",
default_env_created_by_compilation_environments, " ",
"# envs added to CompilationEnvironments: ", env_added);
}
unsigned default_env_created_by_compilation_environments = 0;
unsigned env_added = 0;
};
using StatMap = absl::flat_hash_map<std::string, PerEnvStats>;
GlobalCompEnvStats() = default;
GlobalCompEnvStats(const GlobalCompEnvStats&) = delete;
GlobalCompEnvStats& operator=(const GlobalCompEnvStats&) = delete;
GlobalCompEnvStats(GlobalCompEnvStats&&) = delete;
GlobalCompEnvStats& operator=(GlobalCompEnvStats&&) = delete;
mutable absl::Mutex mu_;
StatMap stats_ ABSL_GUARDED_BY(mu_);
};
}
CompilationEnvironments& CompilationEnvironments::operator=(
const CompilationEnvironments& rhs) {
Clear();
for (const auto& descriptor_message_pair : rhs.environments_) {
auto env = absl::WrapUnique(descriptor_message_pair.second->New());
env->CopyFrom(*descriptor_message_pair.second);
environments_.insert({descriptor_message_pair.first, std::move(env)});
}
return *this;
}
absl::StatusOr<std::unique_ptr<CompilationEnvironments>>
CompilationEnvironments::CreateFromProto(
const CompilationEnvironmentsProto& proto) {
auto envs = std::make_unique<CompilationEnvironments>();
const tsl::protobuf::DescriptorPool* const pool =
tsl::protobuf::DescriptorPool::generated_pool();
for (const auto& env_proto : proto.environments()) {
std::string fullname;
if (!google::protobuf::Any::ParseAnyTypeUrl(env_proto.type_url(),
&fullname)) {
return tsl::errors::DataLoss(
"Invalid CompilationEnvironment message type url: %s",
env_proto.type_url());
}
const tsl::protobuf::Descriptor* const descriptor =
pool->FindMessageTypeByName(fullname);
if (descriptor == nullptr) {
return tsl::errors::DataLoss(
"Unknown CompilationEnvironment message type: %s", fullname);
}
const tsl::protobuf::Message* const prototype =
tsl::protobuf::MessageFactory::generated_factory()->GetPrototype(
descriptor);
if (prototype == nullptr) {
return tsl::errors::Internal(
"Unsupported CompilationEnvironment message type: %s", fullname);
}
std::unique_ptr<tsl::protobuf::Message> env(prototype->New());
if (!env_proto.UnpackTo(env.get())) {
return tsl::errors::DataLoss(
"Unable to unpack CompilationEnvironment message of type '%s'",
fullname);
}
TF_RETURN_IF_ERROR(envs->AddEnv(std::move(env)));
}
return envs;
}
void CompilationEnvironments::RegisterProcessNewEnvFn(
const tsl::protobuf::Descriptor* descriptor,
ProcessNewEnvFn process_new_env) {
absl::MutexLock l(&process_new_env_fns_mu);
if (process_new_env_fns == nullptr) {
process_new_env_fns =
new absl::flat_hash_map<const tsl::protobuf::Descriptor*,
CompilationEnvironments::ProcessNewEnvFn>();
}
const bool inserted =
process_new_env_fns->insert({descriptor, std::move(process_new_env)})
.second;
CHECK(inserted) << "ProcessNewEnvFn for XLA compilation environment '"
<< descriptor->full_name() << "' has already been registered";
}
absl::Status CompilationEnvironments::AddEnv(
std::unique_ptr<tsl::protobuf::Message> env) {
if (!env) {
return tsl::errors::InvalidArgument(
"Can not add a null compilation environment.");
}
const tsl::protobuf::Descriptor& descriptor = *env->GetDescriptor();
return AddEnvImpl(descriptor, std::move(env));
}
CompilationEnvironmentsProto CompilationEnvironments::ToProto() const {
std::vector<const tsl::protobuf::Descriptor*> descriptors;
descriptors.reserve(environments_.size());
for (const auto& [descriptor, message] : environments_) {
descriptors.push_back(descriptor);
}
absl::c_sort(descriptors, [](const tsl::protobuf::Descriptor* lhs,
const tsl::protobuf::Descriptor* rhs) {
return lhs->full_name() < rhs->full_name();
});
CompilationEnvironmentsProto proto;
for (const auto* const descriptor : descriptors) {
proto.add_environments()->PackFrom(*environments_.at(descriptor));
}
return proto;
}
CompilationEnvironments::ProcessNewEnvFn
CompilationEnvironments::GetProcessNewEnvFn(
const tsl::protobuf::Descriptor& descriptor) {
absl::MutexLock l(&process_new_env_fns_mu);
if (process_new_env_fns == nullptr) {
return nullptr;
}
const auto it = process_new_env_fns->find(&descriptor);
if (it == process_new_env_fns->end()) {
return nullptr;
}
return it->second;
}
void CompilationEnvironments::DefaultEnvCreatedByCompilationEnvironments(
std::string_view env_type) {
GlobalCompEnvStats::GetSingleton().DefaultEnvCreatedByCompilationEnvironments(
env_type);
}
void CompilationEnvironments::EnvAdded(std::string_view env_type) {
GlobalCompEnvStats::GetSingleton().EnvAdded(env_type);
}
absl::Status CompilationEnvironments::AddEnvImpl(
const tsl::protobuf::Descriptor& descriptor,
std::unique_ptr<tsl::protobuf::Message> env) {
if (environments_.contains(&descriptor)) {
return tsl::errors::InvalidArgument(
"Replacing CompilationEnvironment of type %s.", descriptor.full_name());
}
ProcessNewEnvFn process_new_env = GetProcessNewEnvFn(descriptor);
if (!process_new_env) {
return tsl::errors::InvalidArgument(
"Unknown compilation environment type: %s", descriptor.full_name());
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<tsl::protobuf::Message> processed_env,
process_new_env(std::move(env)));
const tsl::protobuf::UnknownFieldSet& unknown_fields =
processed_env->GetReflection()->GetUnknownFields(*processed_env);
std::vector<int> unknown_tags;
unknown_tags.reserve(unknown_fields.field_count());
for (int i = 0; i < unknown_fields.field_count(); ++i) {
const tsl::protobuf::UnknownField& field = unknown_fields.field(i);
unknown_tags.push_back(field.number());
}
if (!unknown_tags.empty()) {
LOG(WARNING) << "CompilationEnvironment " << descriptor.full_name()
<< " contains unknown fields with tag numbers: "
<< absl::StrJoin(unknown_tags, ", ");
}
environments_.insert({&descriptor, std::move(processed_env)});
EnvAdded(descriptor.full_name());
return absl::OkStatus();
}
} | #include "xla/service/compilation_environments.h"
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "xla/service/test_compilation_environment.pb.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/protobuf.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv1(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment1> env(
tensorflow::down_cast<test::TestCompilationEnvironment1*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment1>();
}
if (env->some_flag() == 0 || env->some_flag() == 1) {
env->set_some_flag(100);
}
return env;
}
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv2(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment2> env(
tensorflow::down_cast<test::TestCompilationEnvironment2*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment2>();
}
if (env->some_other_flag() == 0) {
env->set_some_other_flag(200);
}
return env;
}
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv3(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment3> env(
tensorflow::down_cast<test::TestCompilationEnvironment3*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment3>();
}
if (env->a_third_flag() == 0) {
env->set_a_third_flag(300);
}
return env;
}
namespace test {
namespace {
class CompilationEnvironmentsTest : public ::testing::Test {
protected:
static void SetUpTestSuite() {
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment1::descriptor(), ProcessNewEnv1);
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment2::descriptor(), ProcessNewEnv2);
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment3::descriptor(), ProcessNewEnv3);
}
};
TEST_F(CompilationEnvironmentsTest, GetDefaultEnv) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, GetDefaultMutableEnv) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, GetAddedEnvNotModifiedByProcessNewEnv) {
CompilationEnvironments envs;
auto env = std::make_unique<TestCompilationEnvironment1>();
env->set_some_flag(5);
TF_ASSERT_OK(envs.AddEnv(std::move(env)));
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 5);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 5);
}
TEST_F(CompilationEnvironmentsTest, GetAddedEnvModifiedByProcessNewEnv) {
CompilationEnvironments envs;
auto env = std::make_unique<TestCompilationEnvironment1>();
env->set_some_flag(1);
TF_ASSERT_OK(envs.AddEnv(std::move(env)));
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, MultipleEnvs) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment2>().some_other_flag(), 200);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, MultipleMutableEnvs) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment2>().some_other_flag(),
200);
envs.GetMutableEnv<TestCompilationEnvironment1>().set_some_flag(101);
envs.GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(201);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 101);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment2>().some_other_flag(),
201);
}
TEST_F(CompilationEnvironmentsTest, CopyConstructor) {
auto envs = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs->AddEnv(std::move(env2)));
envs->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto envs_copy = std::make_unique<CompilationEnvironments>(*envs);
envs.reset();
EXPECT_EQ(envs_copy->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs_copy->GetEnv<TestCompilationEnvironment2>().some_other_flag(),
20);
}
TEST_F(CompilationEnvironmentsTest, CopyAssignment) {
auto envs1 = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs1->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs1->AddEnv(std::move(env2)));
envs1->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto envs2 = std::make_unique<CompilationEnvironments>();
auto env3 = std::make_unique<TestCompilationEnvironment1>();
env3->set_some_flag(30);
TF_ASSERT_OK(envs2->AddEnv(std::move(env3)));
auto env4 = std::make_unique<TestCompilationEnvironment3>();
env4->set_a_third_flag(40);
TF_ASSERT_OK(envs2->AddEnv(std::move(env4)));
*envs2 = *envs1;
envs1.reset();
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment2>().some_other_flag(), 20);
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment3>().a_third_flag(), 300);
}
TEST_F(CompilationEnvironmentsTest, ProtoRoundTrip) {
auto envs = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs->AddEnv(std::move(env2)));
envs->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto proto = envs->ToProto();
TF_ASSERT_OK_AND_ASSIGN(auto envs_deserialized,
CompilationEnvironments::CreateFromProto(proto));
EXPECT_EQ(
envs_deserialized->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs_deserialized->GetEnv<TestCompilationEnvironment2>()
.some_other_flag(),
20);
}
TEST_F(CompilationEnvironmentsTest, EnvTypePresenceCheck) {
CompilationEnvironments envs;
EXPECT_FALSE(envs.HasEnv<TestCompilationEnvironment1>());
envs.GetEnv<TestCompilationEnvironment1>();
EXPECT_TRUE(envs.HasEnv<TestCompilationEnvironment1>());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/compilation_environments.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/compilation_environments_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ab54487-87a5-4c06-94ee-219aa694736a | cpp | tensorflow/tensorflow | while_loop_expensive_invariant_code_motion | third_party/xla/xla/service/while_loop_expensive_invariant_code_motion.cc | third_party/xla/xla/service/while_loop_expensive_invariant_code_motion_test.cc | #include "xla/service/while_loop_expensive_invariant_code_motion.h"
#include <iterator>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/service/while_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace {
using absl::flat_hash_map;
using absl::flat_hash_set;
using absl::InlinedVector;
struct InvariantInfo {
explicit InvariantInfo(int64_t user_count)
: remaining_user_count(user_count) {}
int64_t transitive_input_size = 0;
int64_t remaining_user_count;
HloInstruction* hoisted_copy = nullptr;
InlinedVector<HloInstruction*, 2> blocked_users;
};
static void CreateLoopInvariantCopy(
flat_hash_map<HloInstruction*, InvariantInfo>* invariant_instructions,
HloInstruction* while_instr, HloInstruction* to_hoist) {
HloComputation* parent_of_while = while_instr->parent();
HloComputation* while_body = while_instr->while_body();
struct DFSFrame {
HloInstruction* instruction;
int64_t operand_index;
};
InlinedVector<DFSFrame, 8> dfs_stack;
dfs_stack.push_back({to_hoist, 0});
HloInstruction* while_body_param = while_body->parameter_instruction(0);
HloInstruction* while_operand = while_instr->mutable_operand(0);
do {
DFSFrame* frame = &dfs_stack.back();
if (frame->operand_index == frame->instruction->operand_count()) {
HloInstruction* old_instruction = frame->instruction;
InvariantInfo& info = FindOrDie(*invariant_instructions, old_instruction);
if (info.hoisted_copy == nullptr) {
auto get_new_operand = [&](HloInstruction* old_operand) {
return old_operand == while_body_param
? while_operand
: FindOrDie(*invariant_instructions, old_operand)
.hoisted_copy;
};
InlinedVector<HloInstruction*, 4> new_operands;
absl::c_transform(old_instruction->operands(),
std::back_inserter(new_operands), get_new_operand);
HloInstruction* new_instruction = parent_of_while->AddInstruction(
old_instruction->CloneWithNewOperands(old_instruction->shape(),
new_operands));
info.hoisted_copy = new_instruction;
}
dfs_stack.pop_back();
continue;
}
HloInstruction* next_operand =
frame->instruction->mutable_operand(frame->operand_index++);
if (next_operand == while_body_param ||
FindOrDie(*invariant_instructions, next_operand).hoisted_copy !=
nullptr) {
continue;
}
dfs_stack.push_back({next_operand, 0});
} while (!dfs_stack.empty());
}
}
absl::StatusOr<bool> WhileLoopExpensiveInvariantCodeMotion::
TryHoistingInvariantInstructionsFromWhileBody(HloInstruction* while_instr) {
auto print_no_metadata = HloPrintOptions{}.set_print_metadata(false);
if (!while_instr->shape().IsTuple()) {
return false;
}
std::string while_instr_name = while_instr->ToString(print_no_metadata);
VLOG(2) << "Trying to hoist from " << while_instr_name;
auto maybe_upper_bound = ComputeWhileLoopTripCountUpperBound(while_instr);
if (maybe_upper_bound && *maybe_upper_bound <= 1) {
VLOG(2) << "Loop has a trip count of at most 1, skipping.";
return false;
}
HloComputation* while_body = while_instr->while_body();
flat_hash_map<HloInstruction*, InvariantInfo> invariant_instructions;
flat_hash_map<HloInstruction*, int64_t> to_hoist_when_ready;
for (auto* instr : WhileUtil::GetInvariantGTEsForWhileBody(*while_body)) {
if (instr->shape().IsArray()) {
auto emplace_result = invariant_instructions.emplace(
instr, InvariantInfo(instr->user_count() - 1));
CHECK(emplace_result.second);
InvariantInfo& info = emplace_result.first->second;
info.transitive_input_size = shape_size_function_(instr->shape());
}
}
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kDomain ||
instruction->IsCustomCall("SPMDFullToShardShape") ||
instruction->IsCustomCall("SPMDShardShapeToFull")) {
return false;
}
}
std::vector<HloInstruction*> instructions_to_replace;
std::vector<HloInstruction*> replacement_instructions;
auto hoist = [&](HloInstruction* instruction, const InvariantInfo& info) {
if (info.hoisted_copy) {
return;
}
VLOG(2) << "Hoisting " << instruction->ToString(print_no_metadata);
CreateLoopInvariantCopy(&invariant_instructions, while_instr, instruction);
instructions_to_replace.push_back(instruction);
replacement_instructions.push_back(info.hoisted_copy);
};
flat_hash_set<HloInstruction*> checked_operands;
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
if (instruction->HasSideEffect() ||
instruction->opcode() == HloOpcode::kParameter ||
!instruction->control_predecessors().empty() ||
!instruction->control_successors().empty() ||
instruction == while_body->root_instruction()) {
continue;
}
auto is_invariant = [&](HloInstruction* op) {
return invariant_instructions.find(op) != invariant_instructions.end();
};
if (!absl::c_all_of(instruction->operands(), is_invariant)) {
continue;
}
auto emplace_result = invariant_instructions.emplace(
instruction, InvariantInfo(instruction->user_count()));
CHECK(emplace_result.second);
InvariantInfo& instr_info = emplace_result.first->second;
for (auto* user : instruction->users()) {
if (user == while_body->root_instruction()) {
--instr_info.remaining_user_count;
break;
}
}
int64_t num_blocking_operands = 0;
int64_t output_size = 0;
for (auto* operand : instruction->operands()) {
auto& operand_info = invariant_instructions.at(operand);
if (!checked_operands.contains(operand)) {
instr_info.transitive_input_size += operand_info.transitive_input_size;
--operand_info.remaining_user_count;
checked_operands.insert(operand);
}
if (operand_info.remaining_user_count == 0) {
for (auto* user : operand_info.blocked_users) {
auto it = to_hoist_when_ready.find(user);
if (it != to_hoist_when_ready.end()) {
auto& num_blocking = it->second;
CHECK_GT(num_blocking, 0);
--num_blocking;
if (num_blocking == 0) {
hoist(user, invariant_instructions.at(user));
to_hoist_when_ready.erase(it);
}
}
}
operand_info.blocked_users.clear();
} else if (operand_info.remaining_user_count > 0) {
++num_blocking_operands;
if (operand_info.blocked_users.empty() ||
operand_info.blocked_users.back() != instruction) {
operand_info.blocked_users.push_back(instruction);
}
} else {
LOG(FATAL)
<< "An instruction should not have number of negative users.";
}
}
checked_operands.erase(checked_operands.begin(), checked_operands.end());
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&output_size, this](const Shape& subshape,
const ShapeIndex& ) {
if (subshape.IsArray()) {
output_size += shape_size_function_(subshape);
}
});
if (output_size > instr_info.transitive_input_size) {
continue;
}
if (!worth_hoisting_individually_(instruction)) {
continue;
}
if (num_blocking_operands > 0) {
to_hoist_when_ready.emplace(instruction, num_blocking_operands);
continue;
}
hoist(instruction, instr_info);
}
if (instructions_to_replace.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(
WhileUtil::MakeInstructionsLiveInResult live_in_instructions_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, replacement_instructions));
HloComputation* new_while_body =
live_in_instructions_result.new_while_instr->while_body();
for (int i = 0; i < instructions_to_replace.size(); i++) {
HloInstruction* instruction_to_replace_in_new_while =
FindOrDie(live_in_instructions_result.while_body_instruction_map,
instructions_to_replace[i]);
TF_RETURN_IF_ERROR(new_while_body->ReplaceInstruction(
instruction_to_replace_in_new_while,
live_in_instructions_result.while_body_live_in_values[i]));
}
VLOG(1) << "Hoisted " << instructions_to_replace.size()
<< " instructions from " << while_instr_name;
return true;
}
absl::StatusOr<bool> WhileLoopExpensiveInvariantCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopExpensiveInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->computations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(
bool result,
TryHoistingInvariantInstructionsFromWhileBody(while_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopExpensiveInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2)
<< "HLO module unchanged after WhileLoopExpensiveInvariantCodeMotion";
}
return changed;
}
} | #include "xla/service/while_loop_expensive_invariant_code_motion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using WhileLoopExpensiveInvariantCodeMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
constexpr char kModuleWithNonInflatingInvariantDot[] = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[], f32[16, 8]) parameter(0)
b = get-tuple-element(p_body), index=1
const = f32[] constant(1.0)
lhs = f32[8, 16] broadcast(const), dimensions={}
dot = dot(lhs, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reduced = reduce(dot, const), dimensions={0, 1}, to_apply=mul
a = get-tuple-element(p_body), index=0
add = add(reduced, a)
ROOT root = tuple(add, b)
}
condition {
p_cond = (f32[], f32[16, 8]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[] parameter(0)
param1 = f32[16, 8] parameter(1)
while_init = tuple(param0, param1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsGroupOfAllowedNonInflating) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithNonInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
EXPECT_THAT(while_body->instructions(), Contains(op::Reduce()));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsGroupOfAllNonInflating) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithNonInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot,
HloOpcode::kReduce>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Reduce())));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
DoesNotHoistsUnallowedInstructions) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithNonInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateFalse)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
constexpr char kModuleWithInflatingInvariantDot[] = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[], f32[16, 4]) parameter(0)
b = get-tuple-element(p_body), index=1
const = f32[] constant(1.0)
lhs = f32[4, 16] broadcast(const), dimensions={}
dot = dot(lhs, b), lhs_contracting_dims={0}, rhs_contracting_dims={1}
reduced = reduce(dot, const), dimensions={0, 1}, to_apply=mul
a = get-tuple-element(p_body), index=0
add = add(reduced, a)
ROOT root = tuple(add, b)
}
condition {
p_cond = (f32[], f32[16, 4]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[] parameter(0)
param1 = f32[16, 4] parameter(1)
while_init = tuple(param0, param1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest, DoesNotHoistsInflating) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsGroupOfNonInflatingWithInflatingIntermediate) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot,
HloOpcode::kReduce>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Reduce())));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsOpWithDuplicateOperands) {
constexpr char kModuleWithDuplicateOperands[] = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[4, 4], f32[4, 4]) parameter(0)
a = get-tuple-element(p_body), index=0
dot = dot(a, a), lhs_contracting_dims={0}, rhs_contracting_dims={1}
b = get-tuple-element(p_body), index=1
add = add(b, dot)
ROOT root = tuple(a, add)
}
condition {
p_cond = (f32[4, 4], f32[4, 4]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[4, 4] parameter(0)
param1 = f32[4, 4] parameter(1)
while_init = tuple(param0, param1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
auto m = ParseAndReturnVerifiedModule(kModuleWithDuplicateOperands).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
DoesNotHoistShardingCustomCalls) {
constexpr char kModuleWithShardingCustomCalls[] = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[4, 4], f32[4, 4]) parameter(0)
a = f32[4, 4] get-tuple-element(p_body), index=0
custom-call.1 = f32[4, 4] custom-call(a), custom_call_target="Sharding", sharding={devices=[4,1]0,1,2,3}
custom-call.2 = f32[4, 4] custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
dot = f32[4, 4] dot(a, a), lhs_contracting_dims={0}, rhs_contracting_dims={1}
b = f32[4, 4] get-tuple-element(p_body), index=1
add = f32[4, 4] add(b, dot)
custom-call.3 = f32[4, 4] custom-call(add), custom_call_target="Sharding", sharding={manual}
custom-call.4 = f32[4, 4] custom-call(custom-call.3), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1]0,1,2,3}
ROOT root = (f32[4, 4], f32[4, 4]) tuple(a, custom-call.4)
}
condition {
p_cond = (f32[4, 4], f32[4, 4]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[4, 4] parameter(0)
param1 = f32[4, 4] parameter(1)
while_init = (f32[4, 4], f32[4, 4]) tuple(param0, param1)
ROOT while = (f32[4, 4], f32[4, 4]) while(while_init), condition=condition, body=body
}
)";
auto m = ParseAndReturnVerifiedModule(kModuleWithShardingCustomCalls).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_expensive_invariant_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_expensive_invariant_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19a6f11d-9f4f-4aef-8c9a-58497e83fc3c | cpp | tensorflow/tensorflow | constant_value | third_party/xla/xla/service/constant_value.cc | third_party/xla/xla/service/constant_value_test.cc | #include "xla/service/constant_value.h"
#include <string>
namespace xla {
absl::StatusOr<ConstantValue> ConstantValue::FromLiteral(
const Literal& literal) {
CHECK_EQ(literal.shape().dimensions_size(), 0) << "Expected scalar literal";
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<ConstantValue>>(
[&](auto primitive_type_constant) -> absl::StatusOr<ConstantValue> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return ConstantValue(
static_cast<uint64_t>(
literal.GetFirstElement<
primitive_util::NativeTypeOf<primitive_type_constant>>()),
primitive_util::BitWidth(primitive_type_constant),
primitive_util::IsSignedIntegralType(primitive_type_constant));
}
return InvalidArgument("Unsupported type");
},
literal.shape().element_type());
}
ConstantValue ConstantValue::div(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ / other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) /
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
ConstantValue ConstantValue::mod(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ % other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) %
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
ConstantValue ConstantValue::mul(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ * other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) *
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
bool ConstantValue::lt(const ConstantValue& other) const {
if (!is_signed_) {
return value_ < other.value_;
}
return absl::bit_cast<int64_t>(value_) <
absl::bit_cast<int64_t>(other.value_);
}
bool ConstantValue::gt(const ConstantValue& other) const {
if (!is_signed_) {
return value_ > other.value_;
}
return absl::bit_cast<int64_t>(value_) >
absl::bit_cast<int64_t>(other.value_);
}
std::string ConstantValue::ToString() const {
return is_signed_ ? absl::StrCat(GetSignedValue())
: absl::StrCat(GetUnsignedValue());
}
} | #include "xla/service/constant_value.h"
#include <gtest/gtest.h>
#include "xla/literal_util.h"
namespace xla {
namespace {
class ConstantValueTest : public ::testing::Test {};
TEST_F(ConstantValueTest, ZeroTest32) {
ConstantValue zero = ConstantValue::GetZero(32, false);
EXPECT_EQ(zero.GetSignedValue(), 0);
EXPECT_EQ(zero.GetUnsignedValue(), 0);
EXPECT_EQ(zero.GetBitwidth(), 32);
EXPECT_FALSE(zero.IsSigned());
ConstantValue zero_s = ConstantValue::GetZero(32, true);
EXPECT_EQ(zero_s.GetSignedValue(), 0);
EXPECT_EQ(zero_s.GetUnsignedValue(), 0);
EXPECT_EQ(zero_s.GetBitwidth(), 32);
EXPECT_TRUE(zero_s.IsSigned());
}
TEST_F(ConstantValueTest, OneTest32) {
ConstantValue one = ConstantValue::GetOne(32, false);
EXPECT_EQ(one.GetSignedValue(), 1);
EXPECT_EQ(one.GetUnsignedValue(), 1);
EXPECT_EQ(one.GetBitwidth(), 32);
EXPECT_FALSE(one.IsSigned());
ConstantValue one_s = ConstantValue::GetOne(32, true);
EXPECT_EQ(one_s.GetSignedValue(), 1);
EXPECT_EQ(one_s.GetUnsignedValue(), 1);
EXPECT_EQ(one_s.GetBitwidth(), 32);
EXPECT_TRUE(one_s.IsSigned());
}
TEST_F(ConstantValueTest, Signed23) {
ConstantValue signed_number = ConstantValue::GetSigned(4194303, 23);
EXPECT_EQ(signed_number.GetSignedValue(), 4194303);
EXPECT_EQ(signed_number.GetBitwidth(), 23);
EXPECT_TRUE(signed_number.IsSigned());
ConstantValue signed_number_of = ConstantValue::GetSigned(4194304, 23);
EXPECT_EQ(signed_number_of.GetSignedValue(), -4194304);
EXPECT_EQ(signed_number_of.GetBitwidth(), 23);
EXPECT_TRUE(signed_number_of.IsSigned());
}
TEST_F(ConstantValueTest, Unsigned23) {
ConstantValue unsigned_number = ConstantValue::GetUnsigned(8388607, 23);
EXPECT_EQ(unsigned_number.GetUnsignedValue(), 8388607);
EXPECT_EQ(unsigned_number.GetBitwidth(), 23);
EXPECT_FALSE(unsigned_number.IsSigned());
ConstantValue unsigned_number_of = ConstantValue::GetUnsigned(8388608, 23);
EXPECT_EQ(unsigned_number_of.GetUnsignedValue(), 0);
EXPECT_EQ(unsigned_number_of.GetBitwidth(), 23);
EXPECT_FALSE(unsigned_number_of.IsSigned());
}
TEST_F(ConstantValueTest, FromLiteral) {
auto cv_8 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int8_t>(-32)));
EXPECT_TRUE(cv_8.ok());
EXPECT_TRUE(cv_8->IsSigned());
EXPECT_EQ(cv_8->GetBitwidth(), 8);
EXPECT_EQ(cv_8->GetSignedValue(), -32);
auto cv_u8 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int8_t>(32)));
EXPECT_TRUE(cv_u8.ok());
EXPECT_TRUE(cv_u8->IsSigned());
EXPECT_EQ(cv_u8->GetBitwidth(), 8);
EXPECT_EQ(cv_u8->GetUnsignedValue(), 32);
auto cv_16 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int16_t>(32000)));
EXPECT_TRUE(cv_16.ok());
EXPECT_TRUE(cv_16->IsSigned());
EXPECT_EQ(cv_16->GetBitwidth(), 16);
EXPECT_EQ(cv_16->GetSignedValue(), 32000);
auto cv_u16 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint16_t>(33000)));
EXPECT_TRUE(cv_u16.ok());
EXPECT_FALSE(cv_u16->IsSigned());
EXPECT_EQ(cv_u16->GetBitwidth(), 16);
EXPECT_EQ(cv_u16->GetUnsignedValue(), 33000);
auto cv_32 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int32_t>(-2000000000)));
EXPECT_TRUE(cv_32.ok());
EXPECT_TRUE(cv_32->IsSigned());
EXPECT_EQ(cv_32->GetBitwidth(), 32);
EXPECT_EQ(cv_32->GetSignedValue(), -2000000000);
auto cv_u32 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint32_t>(3000000000)));
EXPECT_TRUE(cv_u32.ok());
EXPECT_FALSE(cv_u32->IsSigned());
EXPECT_EQ(cv_u32->GetBitwidth(), 32);
EXPECT_EQ(cv_u32->GetUnsignedValue(), 3000000000);
auto cv_64 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int64_t>(3000000000)));
EXPECT_TRUE(cv_64.ok());
EXPECT_TRUE(cv_64->IsSigned());
EXPECT_EQ(cv_64->GetBitwidth(), 64);
EXPECT_EQ(cv_64->GetSignedValue(), 3000000000);
auto cv_u64 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint64_t>(6000000000)));
EXPECT_TRUE(cv_u64.ok());
EXPECT_FALSE(cv_u64->IsSigned());
EXPECT_EQ(cv_u64->GetBitwidth(), 64);
EXPECT_EQ(cv_u64->GetUnsignedValue(), 6000000000);
}
TEST_F(ConstantValueTest, Add) {
ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(1, 23);
ConstantValue result = lhs.add(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(8388600, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388607);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetSignedValue(), -6);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(-1, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetSignedValue(), 4194303);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Sub) {
ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(1, 23);
ConstantValue result = lhs.sub(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388606);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388607);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetSignedValue(), -14);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(1, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetSignedValue(), 4194303);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Div) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.div(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 2);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetSignedValue(), -2);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetSignedValue(), -2097152);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Mod) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.mod(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 6);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), -1);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(1, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Mul) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.mul(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 4418);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(8388607, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
result = lhs.mul(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388606);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
result = lhs.mul(rhs);
EXPECT_EQ(result.GetSignedValue(), -30);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, LtGtEq) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
EXPECT_FALSE(lhs.lt(rhs));
EXPECT_TRUE(lhs.gt(rhs));
lhs = ConstantValue::GetUnsigned(8388607, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
EXPECT_FALSE(lhs.lt(rhs));
EXPECT_TRUE(lhs.gt(rhs));
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
EXPECT_TRUE(lhs.lt(rhs));
EXPECT_FALSE(lhs.gt(rhs));
lhs = ConstantValue::GetUnsigned(43, 23);
rhs = ConstantValue::GetUnsigned(43, 23);
EXPECT_TRUE(lhs.eq(rhs));
EXPECT_TRUE(rhs.eq(lhs));
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(-10, 23);
EXPECT_TRUE(lhs.eq(rhs));
EXPECT_TRUE(rhs.eq(lhs));
lhs = ConstantValue::GetUnsigned(4194304, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
EXPECT_FALSE(lhs.eq(rhs));
EXPECT_FALSE(rhs.eq(lhs));
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
EXPECT_FALSE(lhs.eq(rhs));
EXPECT_FALSE(rhs.eq(lhs));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/constant_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/constant_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40d198c4-ab0f-4c3f-b86b-e60a3a7036c9 | cpp | tensorflow/tensorflow | all_reduce_combiner | third_party/xla/xla/service/all_reduce_combiner.cc | third_party/xla/xla/service/all_reduce_combiner_test.cc | #include "xla/service/all_reduce_combiner.h"
#include <algorithm>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::Status CombineAllReduces(absl::Span<HloInstruction* const> to_combine) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " CRS ops";
HloComputation& computation = *to_combine.back()->parent();
HloComputation* reduction = to_combine[0]->to_apply();
const HloOpcode type = reduction->root_instruction()->opcode();
std::vector<HloInstruction*> operands;
std::vector<const Shape*> operand_shapes;
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllReduce);
TF_RET_CHECK(hlo->operands().size() == 1);
TF_RET_CHECK(hlo->to_apply() == reduction ||
(hlo->to_apply()->instruction_count() == 3 &&
hlo->to_apply()->num_parameters() == 2 &&
hlo->to_apply()->root_instruction()->opcode() == type));
TF_RET_CHECK(hlo->shape().IsArray());
for (HloInstruction* operand : hlo->operands()) {
operands.push_back(operand);
operand_shapes.push_back(&operand->shape());
}
}
HloInstruction* combined;
TF_RET_CHECK(operands.size() >= 2);
combined = computation.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes), operands, reduction,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloAllReduceInstruction>(to_combine.front())
->use_global_device_ids()));
combined->set_sharding(
hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine));
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
auto replace_with = HloInstruction::CreateGetTupleElement(
to_combine[i]->shape(), combined, i);
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
to_combine[i], std::move(replace_with)));
}
return absl::OkStatus();
}
}
AllReduceCombiner::AllReduceCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count) {}
absl::StatusOr<bool> AllReduceCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip AllReduceCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1) << "Skip AllReduceCombiner because the module contains all-reduce "
"with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn =
[&domain_map](
const HloInstruction* instruction) -> std::optional<AllReduceKey> {
if (instruction->opcode() != HloOpcode::kAllReduce) {
return std::nullopt;
}
return GetAllReduceKey(instruction, domain_map.get());
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<AllReduceKey>(
computation, key_fn, &CombineAllReduces,
combine_threshold_in_bytes_, combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/all_reduce_combiner.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::nullopt;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
int64_t kMaxCombineCount = 256;
int64_t AllReduceCount(const HloModule& module) {
int64_t count = 0;
for (HloComputation* computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kAllReduce) {
++count;
}
}
}
return count;
}
HloInstruction* MakeCrossReplicaReductions(
std::vector<int64_t> sizes_in_kib, std::vector<HloComputation*> reductions,
std::vector<HloInstruction*>* inputs, HloComputation::Builder* b) {
CHECK_EQ(reductions.size(), sizes_in_kib.size());
std::vector<HloInstruction*> all_reduces;
for (int i = 0; i < sizes_in_kib.size(); i++) {
int64_t size_in_kib = sizes_in_kib[i];
HloComputation* reduction = reductions[i];
auto constant = b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
Shape shape = ShapeUtil::MakeShape(
F32, {static_cast<int32_t>(size_in_kib * 1024 / sizeof(float))});
auto input =
b->AddInstruction(HloInstruction::CreateBroadcast(shape, constant, {}));
inputs->push_back(input);
all_reduces.push_back(b->AddInstruction(HloInstruction::CreateAllReduce(
shape, {input}, reduction, CollectiveDeviceList(),
false, nullopt,
false)));
}
return b->AddInstruction(HloInstruction::CreateTuple(all_reduces));
}
HloComputation* MakeReduction(const HloOpcode type, HloModule* module) {
HloComputation::Builder sum_builder(HloOpcodeString(type));
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}), type, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
return reduction;
}
using AllReduceCombinerTest = HloTestBase;
TEST_F(AllReduceCombinerTest, CombineAllReduces) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
auto root = MakeCrossReplicaReductions(
{1, 2, 10, 7, 6}, {sum, sum, sum, sum, sum}, &inputs, &b);
auto computation = module->AddEntryComputation(b.Build());
AllReduceCombiner combine(10 * 1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
ASSERT_EQ(AllReduceCount(*module), 1);
EXPECT_TRUE(changed);
ASSERT_EQ(root, computation->root_instruction());
ASSERT_EQ(inputs.size(), root->operands().size());
HloInstruction* combined = nullptr;
for (int64_t i = 0; i < root->operands().size(); ++i) {
HloInstruction* hlo = root->mutable_operand(i);
ASSERT_TRUE(hlo->opcode() == HloOpcode::kGetTupleElement);
EXPECT_EQ(hlo->tuple_index(), i);
EXPECT_TRUE(ShapeUtil::Equal(inputs[i]->shape(), hlo->shape()));
if (combined == nullptr) {
combined = hlo->mutable_operand(0);
ASSERT_TRUE(combined->opcode() == HloOpcode::kAllReduce);
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), combined->shape()));
ASSERT_EQ(combined->operands().size(), inputs.size());
}
EXPECT_EQ(combined, hlo->operand(0));
EXPECT_TRUE(ShapeUtil::Equal(inputs[i]->shape(), hlo->shape()));
EXPECT_EQ(combined->operand(i), inputs[i]);
EXPECT_EQ(1, inputs[i]->users().size());
}
ASSERT_NE(combined, nullptr);
}
TEST_F(AllReduceCombinerTest, CombineCrossReplicaReductionsInGroups) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation* min = MakeReduction(HloOpcode::kMinimum, module.get());
HloComputation* max = MakeReduction(HloOpcode::kMaximum, module.get());
HloComputation* sum_2 = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
MakeCrossReplicaReductions(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
{sum, sum_2, min, min, min, max, max, max, sum, sum_2}, &inputs, &b);
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(10 * 1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
ASSERT_EQ(AllReduceCount(*module), 3)
<< "expects 3 groups for 3 reduction types.";
EXPECT_TRUE(changed);
}
TEST_F(AllReduceCombinerTest, RespectThreshold) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
MakeCrossReplicaReductions({8, 4}, {sum, sum}, &inputs, &b);
module->AddEntryComputation(b.Build());
{
AllReduceCombiner combine((8 + 4) * 1024 - 1, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), inputs.size());
EXPECT_FALSE(changed);
}
{
AllReduceCombiner combine((8 + 4) * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 1);
EXPECT_TRUE(changed);
}
}
TEST_F(AllReduceCombinerTest, NoDependentCombination) {
auto module = CreateNewVerifiedModule();
HloComputation* reduction = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
auto all_reduce = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList(),
false, nullopt,
false));
b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {all_reduce}, reduction,
CollectiveDeviceList(), false,
nullopt, false));
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, GroupAllReduce) {
auto module = CreateNewVerifiedModule(TestName(), 4);
HloComputation::Builder b(TestName());
HloComputation* reduction = MakeReduction(HloOpcode::kAdd, module.get());
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
auto crs0 = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList({{0, 1}, {2, 3}}),
false,
nullopt, false));
auto crs1 = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList({{0, 2}, {1, 3}}),
false,
nullopt, false));
b.AddInstruction(HloInstruction::CreateTuple({crs0, crs1}));
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, DomainPreventsCombining) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
crs0 = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
crs1 = f32[128] all-reduce(param1),
replica_groups={}, to_apply=summit, sharding={maximal device=1}
domain0 = f32[128] domain(crs0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}}, exit={maximal device=0}}
domain1 = f32[128] domain(crs1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}}, exit={maximal device=1}}
ROOT tuple = (f32[128], f32[128]) tuple(domain0, domain1),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
LOG(INFO) << "Original module:\n" << module->ToString();
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, CombineFromTwoDomainsWithSameMetadata) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
param2 = f32[128] parameter(2), sharding={maximal device=1}
crs0 = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
crs1 = f32[128] all-reduce(param1),
replica_groups={}, to_apply=summit, sharding={maximal device=1}
crs2 = f32[128] all-reduce(param2),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
domain0 = f32[128] domain(crs0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
domain1 = f32[128] domain(crs1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=1}}
domain2 = f32[128] domain(crs2),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
ROOT tuple = (f32[128], f32[128], f32[128]) tuple(domain0, domain1, domain2),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 3);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_TRUE(changed);
const HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
ASSERT_EQ(param0->user_count(), 1);
const HloInstruction* combined_ar = param0->users().front();
ASSERT_EQ(combined_ar->opcode(), HloOpcode::kAllReduce);
EXPECT_THAT(combined_ar, testing::opcode_matchers::Sharding(
"{{maximal device=0}, {maximal device=0}}"));
}
TEST_F(AllReduceCombinerTest, DoNotCombineCrossShardAndCrossReplicaInSPMD) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
cross_shard_ar = f32[128] all-reduce(param0),
replica_groups={{0}}, to_apply=summit, channel_id=1
cross_replica_ar = f32[128] all-reduce(param1),
replica_groups={{0}}, to_apply=summit, sharding={maximal device=1}
ROOT tuple = (f32[128], f32[128]) tuple(cross_shard_ar, cross_replica_ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, CrossCoreAllReduce) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
crs00 = f32[128] all-reduce(param0),
replica_groups={{0}}, channel_id=1, to_apply=summit,
sharding={maximal device=0}
crs01 = f32[128] all-reduce(param1),
replica_groups={{0}}, channel_id=1, to_apply=summit,
sharding={maximal device=1}
crs10 = f32[128] all-reduce(param0),
replica_groups={{0}}, channel_id=2, to_apply=summit,
sharding={maximal device=0}
crs11 = f32[128] all-reduce(param1),
replica_groups={{0}}, channel_id=2, to_apply=summit,
sharding={maximal device=1}
domain0 = f32[128] domain(crs00),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
ROOT add = f32[128] add(domain0, crs11),
sharding={maximal device=1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 4);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Domain(op::GetTupleElement(AllOf(
op::AllReduce(op::Parameter(0), op::Parameter(0)),
op::Shape("(f32[128], f32[128])")))),
op::GetTupleElement(AllOf(
op::AllReduce(op::Parameter(1), op::Parameter(1)),
op::Shape("(f32[128], f32[128])")))));
}
TEST_F(AllReduceCombinerTest, CrossCombineGroupCycle) {
const char* const hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
%max {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] maximum(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[128] parameter(1)
crs00 = f32[128] all-reduce(p0), to_apply=add
crs10 = f32[128] all-reduce(p1), to_apply=max
crs01 = f32[128] all-reduce(crs00), to_apply=max
crs11 = f32[128] all-reduce(crs10), to_apply=add
add0 = f32[128] add(crs01, crs11)
crs02 = f32[128] all-reduce(add0), to_apply=add
crs12 = f32[128] all-reduce(crs11), to_apply=add
ROOT tuple = (f32[128], f32[128]) tuple(crs02, crs12)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 6);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 4);
EXPECT_TRUE(changed);
auto crs0 = op::AllReduce(op::Parameter(0), op::AllReduce(op::Parameter(1)));
auto add = op::Add(op::AllReduce(op::GetTupleElement(crs0, 0)),
op::GetTupleElement(crs0, 1));
auto crs1 = op::AllReduce(add, op::GetTupleElement(crs0));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(crs1, 0), op::GetTupleElement(crs1, 1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
656417ff-07ef-40b3-a25e-31c722343732 | cpp | tensorflow/tensorflow | hlo_verifier | third_party/xla/xla/service/hlo_verifier.cc | third_party/xla/xla/service/hlo_verifier_test.cc | #include "xla/service/hlo_verifier.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsCallerInstruction(HloInstruction* hlo) {
return HloInstruction::MightHaveCalledComputations(hlo->opcode());
}
absl::Status CheckOperandCount(const HloInstruction* hlo, int expected) {
if (hlo->operand_count() != expected) {
return Internal("Expected %d operands for %s instruction: %s", expected,
HloOpcodeString(hlo->opcode()), hlo->ToString());
}
return absl::OkStatus();
}
int64_t GetSubgroupSize(HloCollectiveInstruction* hlo,
CollectiveOpGroupMode group_mode) {
const HloModuleConfig& config = hlo->GetModule()->config();
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
int64_t replica_subgroup_size =
hlo->replica_groups().empty()
? config.replica_count()
: hlo->replica_groups()[0].replica_ids_size();
if (group_mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
replica_subgroup_size *= config.num_partitions();
}
return replica_subgroup_size;
}
case CollectiveOpGroupMode::kFlattenedID:
return hlo->replica_groups()[0].replica_ids_size();
case CollectiveOpGroupMode::kCrossPartition:
return hlo->replica_groups().empty()
? config.num_partitions()
: hlo->replica_groups()[0].replica_ids_size();
}
}
absl::Status CheckNestedComputationThreadNameEqual(
const HloComputation* comp, bool skip_nested_async_op_check) {
for (const HloInstruction* instr : comp->instructions()) {
if (skip_nested_async_op_check && instr->IsAsynchronous()) {
continue;
}
for (const HloComputation* called_cmp : instr->called_computations()) {
if (called_cmp->execution_thread() != comp->execution_thread()) {
return Internal(
"Nested computations expects same computation's thread name: %s vs "
"%s, in called computation `%s` vs caller computation `%s`",
called_cmp->execution_thread(), comp->execution_thread(),
called_cmp->name(), comp->name());
}
TF_RETURN_IF_ERROR(CheckNestedComputationThreadNameEqual(
called_cmp, skip_nested_async_op_check));
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::CheckParameterCount(
const HloInstruction* calling_instruction,
const HloComputation* computation, int expected) {
if (computation->num_parameters() != expected) {
return Internal(
"Expected computation %s called from %s to have %d parameters, has %d",
computation->name(), calling_instruction->name(), expected,
computation->num_parameters());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::Preprocess(HloInstruction* hlo) {
if (!hlo->called_computations().empty() && !IsCallerInstruction(hlo)) {
return Internal(
"Called computations specified for non-caller instruction %s",
hlo->ToString());
}
std::optional<int> arity = HloOpcodeArity(hlo->opcode());
if (arity) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, *arity));
}
if (!opts_.allow_unbounded_dynamism && hlo->shape().is_unbounded_dynamic()) {
return InvalidArgument("Unbounded dynamism is disabled for instruction: %s",
hlo->ToString());
}
if (hlo->shape().has_layout()) {
if (hlo->shape().layout().minor_to_major_size() !=
hlo->shape().dimensions_size()) {
return InvalidArgument(
"Instruction has mismatched minor-to-major size and dimension size: "
"%s",
hlo->ToString());
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleElementwiseUnary(HloInstruction* hlo) {
return CheckUnaryShape(hlo);
}
absl::Status ShapeVerifier::HandleElementwiseBinary(HloInstruction* hlo) {
return CheckBinaryShape(hlo);
}
absl::Status ShapeVerifier::HandleClamp(HloInstruction* clamp) {
return CheckTernaryShape(clamp);
}
absl::Status ShapeVerifier::HandleSelect(HloInstruction* select) {
return CheckTernaryShape(select);
}
absl::Status ShapeVerifier::HandleConcatenate(HloInstruction* concatenate) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : concatenate->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(concatenate,
ShapeInference::InferConcatOpShape(
operand_shapes, concatenate->concatenate_dimension()));
}
absl::Status ShapeVerifier::HandleConvert(HloInstruction* convert) {
return CheckShape(convert, ShapeInference::InferConvertShape(
convert->operand(0)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleBitcastConvert(HloInstruction* convert) {
return CheckShape(convert, ShapeInference::InferBitcastConvertShape(
convert->operand(0)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleStochasticConvert(HloInstruction* convert) {
return CheckShape(
convert, ShapeInference::InferStochasticConvertShape(
convert->operand(0)->shape(), convert->operand(1)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleCopy(HloInstruction* copy) {
return CheckUnaryShape(copy);
}
absl::Status ShapeVerifier::HandleDot(HloInstruction* dot) {
auto sparsity = Cast<HloDotInstruction>(dot)->sparsity();
TF_RETURN_IF_ERROR(
CheckOperandCount(dot, HloDotInstruction::kOperands + sparsity.size()));
TF_ASSIGN_OR_RETURN(
const Shape expected,
ShapeInference::InferDotOpShape(
dot->operand(0)->shape(), dot->operand(1)->shape(),
dot->dot_dimension_numbers(),
dot->shape().element_type(), sparsity));
if (auto nibble_count =
absl::c_count(dot->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE)) {
if (nibble_count == 1) {
return InvalidArgument("Dot cannot have a single packed nibble argument");
}
if (nibble_count == 2) {
if (!ShapeUtil::ElementIsIntegralWithBits(dot->operand(0)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. LHS is "
"%s.",
dot->operand(0)->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(dot->operand(1)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. RHS is "
"%s.",
dot->operand(1)->ToString());
}
}
}
for (int i = 0; i < sparsity.size(); ++i) {
const SparsityDescriptor& descriptor = sparsity[i];
TF_RET_CHECK(descriptor.index() == 0 || descriptor.index() == 1);
TF_ASSIGN_OR_RETURN(const Shape expected_metadata_shape,
ShapeInference::InferSparseDotMetadataShape(
dot->operand(descriptor.index())->shape(),
dot->dot_dimension_numbers(), descriptor));
const Shape actual_metadata_shape =
dot->operand(HloDotInstruction::kOperands + i)->shape();
if (!ShapeUtil::Compatible(actual_metadata_shape,
expected_metadata_shape)) {
return Internal(
"Expected sparse dot metadata to have shape equal to %s, actual "
"shape is %s:\n%s",
StringifyShape(expected_metadata_shape),
StringifyShape(actual_metadata_shape), dot->ToString());
}
}
return CheckShape(dot, expected);
}
absl::Status ShapeVerifier::HandleConvolution(HloInstruction* convolution) {
TF_ASSIGN_OR_RETURN(
Shape expected,
ShapeInference::InferConvolveShape(
convolution->operand(0)->shape(), convolution->operand(1)->shape(),
convolution->feature_group_count(), convolution->batch_group_count(),
convolution->window(), convolution->convolution_dimension_numbers(),
convolution->shape().element_type()));
if (auto nibble_count =
absl::c_count(convolution->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE)) {
if (nibble_count == 1) {
return InvalidArgument(
"Convolution cannot have a single packed nibble argument");
}
if (nibble_count == 2) {
if (convolution->feature_group_count() != 1) {
return InvalidArgument(
"Packed nibble precision does not support feature group count "
"%s.",
convolution->ToString());
}
if (convolution->batch_group_count() != 1) {
return InvalidArgument(
"Packed nibble precision does not support batch group count "
"%s.",
convolution->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(
convolution->operand(0)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. LHS is "
"%s.",
convolution->operand(0)->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(
convolution->operand(1)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. RHS is "
"%s.",
convolution->operand(1)->ToString());
}
}
}
return CheckShape(convolution, expected);
}
absl::Status ShapeVerifier::HandleFft(HloInstruction* fft) {
TF_ASSIGN_OR_RETURN(
const Shape expected,
ShapeInference::InferFftShape(fft->operand(0)->shape(), fft->fft_type(),
fft->fft_length()));
return CheckShape(fft, expected);
}
absl::Status ShapeVerifier::HandleTriangularSolve(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(const Shape expected,
ShapeInference::InferTriangularSolveShape(
hlo->operand(0)->shape(), hlo->operand(1)->shape(),
hlo->triangular_solve_options()));
return CheckShape(hlo, expected);
}
absl::Status ShapeVerifier::HandleCholesky(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, 1));
TF_ASSIGN_OR_RETURN(const Shape expected, ShapeInference::InferCholeskyShape(
hlo->operand(0)->shape()));
return CheckShape(hlo, expected);
}
absl::Status ShapeVerifier::HandleOptimizationBarrier(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, 1));
return CheckShape(hlo, hlo->operand(0)->shape());
}
bool ShapeVerifier::ShapesSame(const Shape& a, const Shape& b,
Shape::Equal equal) {
if (!opts_.layout_sensitive) {
return ShapeUtil::Compatible(a, b);
}
return equal(a, b);
}
static absl::Status CheckReplicaGroups(HloInstruction* hlo,
CollectiveOpGroupMode group_mode,
bool uniform_replica_group_size = true) {
if (!hlo->replica_groups().empty()) {
absl::flat_hash_set<int64_t> replicas_seen;
for (const ReplicaGroup& g : hlo->replica_groups()) {
if (g.replica_ids().empty()) {
return Internal("Instruction cannot have an empty replica group: %s",
hlo->ToString());
}
for (int64_t i : g.replica_ids()) {
if (!replicas_seen.insert(i).second) {
return Internal(
"Replica %d is repeated in instruction's replica-groups: %s", i,
hlo->ToString());
}
}
}
size_t n = replicas_seen.size();
for (int64_t i = 0; i < n; ++i) {
if (!replicas_seen.count(i)) {
return Internal(
"Replica %d is not named in instruction's replica-groups: %s", i,
hlo->ToString());
}
}
int64_t replica_count = hlo->GetModule()->config().replica_count();
int64_t num_partitions = hlo->GetModule()->config().num_partitions();
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
TF_RET_CHECK(replica_count == 1 || n == replica_count)
<< "In " << CollectiveOpGroupModeToString(group_mode)
<< " mode, replica groups should contain " << replica_count
<< " replicas, but found " << n << ": " << hlo->ToString();
break;
}
case CollectiveOpGroupMode::kCrossPartition: {
TF_RET_CHECK(num_partitions == 1 || n == num_partitions)
<< "In " << CollectiveOpGroupModeToString(group_mode)
<< " mode, replica groups should contain " << num_partitions
<< " partitions, but found " << n << ": " << hlo->ToString();
break;
}
case CollectiveOpGroupMode::kFlattenedID: {
const int64_t num_flattened_ids = replica_count * num_partitions;
TF_RET_CHECK(num_flattened_ids == 1 || n == num_flattened_ids)
<< "In " << CollectiveOpGroupModeToString(group_mode)
<< " mode, replica groups should contain " << num_flattened_ids
<< " flattened IDs, but found " << n << ": " << hlo->ToString();
break;
}
}
if (uniform_replica_group_size) {
int64_t size = hlo->replica_groups()[0].replica_ids_size();
for (const ReplicaGroup& g : hlo->replica_groups()) {
TF_RET_CHECK(size == g.replica_ids_size())
<< "Replica groups expected to be of uniform size";
}
}
} else {
TF_RET_CHECK(group_mode != CollectiveOpGroupMode::kFlattenedID)
<< "Replica groups must be specified in flattened-id mode";
}
return absl::OkStatus();
}
static absl::Status CheckCommonAllGatherInvariants(
HloInstruction* hlo, int64_t* computed_shard_count) {
auto ag = Cast<HloAllGatherInstruction>(hlo);
CHECK_NE(computed_shard_count, nullptr) << "Expected a shard count as input";
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ag->channel_id().has_value(),
ag->use_global_device_ids()));
TF_RETURN_IF_ERROR(CheckReplicaGroups(ag, group_mode));
TF_RET_CHECK(ag->all_gather_dimension() >= 0);
TF_RET_CHECK(ag->operand_count() >= 1);
int64_t shard_count;
for (int64_t i = 0; i < ag->operand_count(); ++i) {
TF_RET_CHECK(ag->all_gather_dimension() < ag->operand(i)->shape().rank());
Shape output_shape;
if (hlo->opcode() == HloOpcode::kAllGather) {
output_shape = (ag->operand_count() == 1) ? ag->shape()
: ag->shape().tuple_shapes(i);
} else {
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGatherStart);
output_shape = (ag->operand_count() == 1)
? ag->shape().tuple_shapes(1)
: ag->shape().tuple_shapes(1).tuple_shapes(i);
}
TF_RET_CHECK(ag->all_gather_dimension() < output_shape.rank());
if (i == 0) {
shard_count = CeilOfRatio(
output_shape.dimensions(ag->all_gather_dimension()),
ag->operand(i)->shape().dimensions(ag->all_gather_dimension()));
}
}
int64_t subgroup_size = GetSubgroupSize(ag, group_mode);
TF_RET_CHECK(subgroup_size == 1 || shard_count == subgroup_size)
<< "shard_count = " << shard_count
<< ", subgroup_size = " << subgroup_size << ", " << hlo->ToString();
*computed_shard_count = shard_count;
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleAllGather(HloInstruction* hlo) {
auto ag = Cast<HloAllGatherInstruction>(hlo);
int64_t shard_count;
TF_RETURN_IF_ERROR(CheckCommonAllGatherInvariants(hlo, &shard_count));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(
ag, ShapeInference::InferAllGatherShape(
operand_shapes, ag->all_gather_dimension(), shard_count));
}
absl::Status ShapeVerifier::HandleAllGatherStart(HloInstruction* hlo) {
auto ag = Cast<HloAllGatherInstruction>(hlo);
int64_t shard_count;
TF_RETURN_IF_ERROR(CheckCommonAllGatherInvariants(hlo, &shard_count));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(
ag, ShapeInference::InferAllGatherStartShape(
operand_shapes, ag->all_gather_dimension(), shard_count));
}
absl::Status ShapeVerifier::HandleAllGatherDone(HloInstruction* hlo) {
return CheckShape(
hlo, ShapeInference::InferAllGatherDoneShape(hlo->operand(0)->shape()));
}
absl::Status ShapeVerifier::HandleAllReduce(HloInstruction* hlo) {
auto ar = Cast<HloAllReduceInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ar->channel_id().has_value(),
ar->use_global_device_ids()));
TF_RETURN_IF_ERROR(
CheckReplicaGroups(ar, group_mode, false));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(hlo, ShapeInference::InferAllReduceShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleReduceScatter(HloInstruction* hlo) {
auto ars = Cast<HloReduceScatterInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ars->channel_id().has_value(),
ars->use_global_device_ids()));
TF_RETURN_IF_ERROR(CheckReplicaGroups(ars, group_mode));
TF_RET_CHECK(ars->scatter_dimension() >= 0);
TF_RET_CHECK(ars->operand_count() >= 1);
for (int64_t i = 0; i < ars->operand_count(); ++i) {
TF_RET_CHECK(ars->scatter_dimension() < ars->operand(i)->shape().rank());
const Shape& output_shape = (ars->operand_count() == 1)
? ars->shape()
: ars->shape().tuple_shapes(i);
TF_RET_CHECK(ars->scatter_dimension() < output_shape.rank());
}
const Shape& output0_shape =
(ars->operand_count() == 1) ? ars->shape() : ars->shape().tuple_shapes(0);
int64_t shard_count =
CeilOfRatio(ars->operand(0)->shape().dimensions(ars->scatter_dimension()),
output0_shape.dimensions(ars->scatter_dimension()));
int64_t subgroup_size = GetSubgroupSize(ars, group_mode);
TF_RET_CHECK(subgroup_size == 1 || shard_count == subgroup_size)
<< "shard_count = " << shard_count
<< ", subgroup_size = " << subgroup_size << ", " << hlo->ToString();
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(ars,
ShapeInference::InferReduceScatterShape(
operand_shapes, ars->scatter_dimension(), shard_count));
}
absl::Status ShapeVerifier::HandleAllReduceStart(HloInstruction* hlo) {
auto ar = Cast<HloAllReduceInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ar->channel_id().has_value(),
ar->use_global_device_ids()));
TF_RETURN_IF_ERROR(
CheckReplicaGroups(ar, group_mode, false));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(hlo,
ShapeInference::InferAllReduceStartShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleAllReduceDone(HloInstruction* hlo) {
return CheckShape(
hlo, ShapeInference::InferAllReduceDoneShape(hlo->operand(0)->shape()));
}
absl::Status ShapeVerifier::HandleAllToAll(HloInstruction* hlo) {
auto* all_to_all = Cast<HloAllToAllInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(
all_to_all->channel_id().has_value(), std::nullopt));
TF_RETURN_IF_ERROR(CheckReplicaGroups(hlo, group_mode));
TF_RET_CHECK(all_to_all != nullptr);
const int64_t split_count = GetSubgroupSize(all_to_all, group_mode);
if (all_to_all->split_dimension()) {
TF_RET_CHECK(hlo->operand_count() == 1);
return CheckShape(
hlo, ShapeInference::InferAllToAllShape(
hlo->operand(0)->shape(), *all_to_all->split_dimension(),
*all_to_all->split_dimension(), split_count));
} else {
TF_RET_CHECK(hlo->operand_count() == split_count);
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(hlo,
ShapeInference::InferAllToAllTupleShape(operand_shapes));
}
}
absl::Status ShapeVerifier::HandlePartitionId(HloInstruction* hlo) {
return CheckShape(hlo, ShapeUtil::MakeShape(U32, {}));
}
absl::Status ShapeVerifier::HandleReplicaId(HloInstruction* hlo) {
return CheckShape(hlo, ShapeUtil::MakeShape(U32, {}));
}
namespace {
absl::Status CheckBufferOffset(const Shape& buffer_shape,
const Shape& buffer_offset_shape) {
if (!buffer_offset_shape.IsTuple()) {
return Internal("Buffer offset is not tuple.");
}
bool all_is_array =
absl::c_all_of(buffer_offset_shape.tuple_shapes(),
[](const Shape& shape) { return shape.IsArray(); });
bool all_is_tuple =
absl::c_all_of(buffer_offset_shape.tuple_shapes(),
[](const Shape& shape) { return shape.IsTuple(); });
if (!all_is_array && !all_is_tuple) {
return Internal(
"Buffer offset should either be a tuple of arrays or "
" a tuple of tuples.");
}
if (all_is_tuple) {
if (absl::c_any_of(buffer_offset_shape.tuple_shapes(),
[&buffer_shape](const Shape& shape) {
return ShapeUtil::TupleElementCount(shape) !=
buffer_shape.rank();
})) {
return Internal(
"Buffer offset index should have the same number of "
"elements as the buffer's rank.");
}
} else {
if (buffer_offset_shape.tuple_shapes_size() != buffer_shape.rank()) {
return Internal(
"Buffer offset index should have the same number of "
"elements as the buffer's rank.");
}
}
return absl::OkStatus();
}
absl::Status CheckInplaceCollectivePermute(HloInstruction* collective_permute) {
if (collective_permute->operand_count() == 1) {
return absl::OkStatus();
}
if (collective_permute->operand_count() != 4) {
return Internal("Unexpected number of operands: %d.",
collective_permute->operand_count());
}
const Shape& input_buffer_shape = collective_permute->operand(0)->shape();
const Shape& output_buffer_shape = collective_permute->operand(1)->shape();
const Shape& input_offset_shape = collective_permute->operand(2)->shape();
const Shape& output_offset_shape = collective_permute->operand(3)->shape();
if (input_buffer_shape.IsArray() && output_buffer_shape.IsArray()) {
absl::Status check_input_buffer_offset =
CheckBufferOffset(input_buffer_shape, input_offset_shape);
if (!check_input_buffer_offset.ok()) {
return check_input_buffer_offset;
}
absl::Status check_output_buffer_offset =
CheckBufferOffset(output_buffer_shape, output_offset_shape);
if (!check_output_buffer_offset.ok()) {
return check_output_buffer_offset;
}
} else if (input_buffer_shape.IsTuple() && output_buffer_shape.IsTuple()) {
if (ShapeUtil::TupleElementCount(input_buffer_shape) !=
ShapeUtil::TupleElementCount(output_buffer_shape)) {
return Internal("Unmatching input buffers and output buffers.");
}
if (!input_offset_shape.IsTuple() ||
ShapeUtil::TupleElementCount(input_offset_shape) !=
ShapeUtil::TupleElementCount(input_buffer_shape)) {
return Internal("Unmatching input buffers and input offset.");
}
for (int i = 0; i < input_buffer_shape.tuple_shapes_size(); ++i) {
absl::Status check_input_buffer_offset =
CheckBufferOffset(input_buffer_shape.tuple_shapes(i),
input_offset_shape.tuple_shapes(i));
if (!check_input_buffer_offset.ok()) {
return check_input_buffer_offset;
}
}
if (!output_offset_shape.IsTuple() ||
ShapeUtil::TupleElementCount(output_offset_shape) !=
ShapeUtil::TupleElementCount(output_buffer_shape)) {
return Internal("Unmatching output buffers and output offset.");
}
for (int i = 0; i < output_buffer_shape.tuple_shapes_size(); ++i) {
absl::Status check_output_buffer_offset =
CheckBufferOffset(output_buffer_shape.tuple_shapes(i),
output_offset_shape.tuple_shapes(i));
if (!check_output_buffer_offset.ok()) {
return check_output_buffer_offset;
}
}
} else {
return Internal("Unmatching input buffers and output buffers.");
}
return absl::OkStatus();
}
absl::Status CheckDuplicatedSourceOrTarget(HloInstruction* hlo,
CollectiveOpGroupMode group_mode) {
const HloModuleConfig& config = hlo->GetModule()->config();
const int64_t limit = group_mode == CollectiveOpGroupMode::kCrossReplica
? config.replica_count()
: config.num_partitions();
absl::flat_hash_map<int64_t, std::vector<int64_t>> seen_source_to_targets;
absl::flat_hash_map<int64_t, std::vector<int64_t>> seen_target_to_sources;
int allowed_seen_count = 1;
if (hlo->operand_count() == 4) {
if (hlo->operand(0)->shape().IsArray()) {
allowed_seen_count = hlo->operand(2)->shape().tuple_shapes_size();
} else {
allowed_seen_count =
hlo->operand(2)->shape().tuple_shapes(0).tuple_shapes_size();
}
}
for (const auto& p : hlo->source_target_pairs()) {
TF_RET_CHECK(p.first >= 0)
<< "Source " << p.first
<< " in the instruction's source-target pair must be >= 0 : "
<< hlo->ToString();
TF_RET_CHECK(limit == 1 || p.first < limit)
<< "Source " << p.first
<< " in the instruction's source-target pair must be < " << limit
<< " : " << hlo->ToString();
if (seen_source_to_targets.contains(p.first) &&
seen_source_to_targets[p.first].size() == allowed_seen_count) {
if (allowed_seen_count == 1) {
return Internal(
"Source %d appears more than once in instruction's source-target "
"pairs: %s",
p.first, hlo->ToString());
} else {
return Internal(
"Source %d appears more than %d times in instruction's "
"source-target "
"pairs: %s",
p.first, allowed_seen_count, hlo->ToString());
}
} else {
seen_source_to_targets[p.first].push_back(p.second);
}
TF_RET_CHECK(p.second >= 0)
<< "Target " << p.second
<< " in the instruction's source-target pair must be >= 0 : "
<< hlo->ToString();
TF_RET_CHECK(limit == 1 || p.second < limit)
<< "Target " << p.second
<< " in the instruction's source-target pair must be < " << limit
<< " : " << hlo->ToString();
if (seen_target_to_sources.contains(p.second) &&
seen_target_to_sources[p.second].size() == allowed_seen_count) {
if (allowed_seen_count == 1) {
return Internal(
"Target %d appears more than once in instruction's source-target "
"pairs: %s",
p.second, hlo->ToString());
} else {
return Internal(
"Target %d appears more than %d times in instruction's "
"source-target "
"pairs: %s",
p.second, allowed_seen_count, hlo->ToString());
}
} else {
seen_target_to_sources[p.second].push_back(p.first);
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::HandleCollectiveBroadcast(HloInstruction* hlo) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(
hlo, ShapeInference::InferCollectiveBroadcastShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleCollectivePermute(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(hlo->channel_id().has_value(),
std::nullopt));
TF_RETURN_IF_ERROR(CheckInplaceCollectivePermute(hlo));
TF_RETURN_IF_ERROR(CheckDuplicatedSourceOrTarget(hlo, group_mode));
std::vector<const Shape*> operand_shapes;
absl::c_transform(
hlo->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
return CheckShape(
hlo, ShapeInference::InferCollectivePermuteShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleCollectivePermuteStart(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(hlo->channel_id().has_value(),
std::nullopt));
TF_RETURN_IF_ERROR(CheckInplaceCollectivePermute(hlo));
TF_RETURN_IF_ERROR(CheckDuplicatedSourceOrTarget(hlo, group_mode));
std::vector<const Shape*> operand_shapes;
absl::c_transform(
hlo->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
std::vector<Shape> context_shapes;
if (hlo->shape().tuple_shapes_size() > 2) {
context_shapes = std::vector<Shape>(hlo->shape().tuple_shapes().begin() + 2,
hlo->shape().tuple_shapes().end());
}
return CheckShape(hlo, ShapeInference::InferCollectivePermuteStartShape(
operand_shapes, context_shapes));
}
absl::Status ShapeVerifier::HandleCollectivePermuteDone(HloInstruction* hlo) {
return CheckShape(hlo, ShapeInference::InferCollectivePermuteDoneShape(
hlo->operand(0)->shape()));
}
absl::Status ShapeVerifier::HandleReducePrecision(
HloInstruction* reduce_precision) {
return CheckShape(reduce_precision, ShapeInference::InferReducePrecisionShape(
reduce_precision->operand(0)->shape(),
reduce_precision->exponent_bits(),
reduce_precision->mantissa_bits()));
}
absl::Status ShapeVerifier::CheckIsTokenOperand(
const HloInstruction* instruction, int64_t operand_no) {
const HloInstruction* token = instruction->operand(operand_no);
if (!ShapeUtil::Equal(token->shape(), ShapeUtil::MakeTokenShape())) {
return Internal(
"Expected operand %d to be token-shaped, actual shape is "
"%s:\n%s",
operand_no, StringifyShape(token->shape()), instruction->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::CheckOperandAndParameter(
const HloInstruction* instruction, int64_t operand_number,
const HloComputation* computation, int64_t parameter_number) {
const HloInstruction* operand = instruction->operand(operand_number);
const HloInstruction* parameter =
computation->parameter_instruction(parameter_number);
if (!ShapesSame(operand->shape(), parameter->shape())) {
return Internal("Operand %s shape does not match parameter's %s in %s",
operand->ToString(), parameter->ToString(),
instruction->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleInfeed(HloInstruction* instruction) {
HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction);
TF_RETURN_IF_ERROR(CheckIsTokenOperand(instruction, 0));
return CheckShape(infeed,
ShapeUtil::MakeTupleShape(
{infeed->infeed_shape(), ShapeUtil::MakeTokenShape()}),
true);
}
absl::Status ShapeVerifier::HandleOutfeed(HloInstruction* instruction) {
HloOutfeedInstruction* outfeed = Cast<HloOutfeedInstruction>(instruction);
TF_RETURN_IF_ERROR(CheckIsTokenOperand(instruction, 1));
if (!ShapesSame(outfeed->outfeed_shape(), outfeed->operand(0)->shape())) {
return Internal(
"Expected outfeed shape to be equal to operand's shape %s, "
"actual shape is %s:\n%s",
StringifyShape(outfeed->operand(0)->shape()),
StringifyShape(outfeed->outfeed_shape()), outfeed->ToString());
}
return CheckShape(outfeed, ShapeUtil::MakeTokenShape());
}
bool ShapeVerifier::HasCompatibleElementTypes(const Shape& shape_0,
const Shape& shape_1,
const Shape& result_shape) {
return ShapeUtil::SameElementType(shape_0, shape_1) &&
(ShapeUtil::SameElementType(shape_0, result_shape) ||
(opts_.allow_mixed_precision &&
ShapeUtil::SameElementTypeIgnoringFpPrecision(shape_0,
result_shape)));
}
absl::Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
TF_RETURN_IF_ERROR(CheckOperandCount(instruction, 2));
const Shape& shape_0 = instruction->operand(0)->shape();
const Shape& shape_1 = instruction->operand(1)->shape();
if (!ShapeUtil::IsScalar(shape_0) || !ShapeUtil::IsScalar(shape_1)) {
return Internal(
"Expected scalar types for the two operands of Rng instruction: %s",
instruction->ToString());
}
if (!HasCompatibleElementTypes(shape_0, shape_1, instruction->shape())) {
return Internal(
"Expected compatible element types for the result and the two operands"
" of Rng instruction: %s",
instruction->ToString());
}
PrimitiveType element_type = shape_0.element_type();
switch (instruction->random_distribution()) {
case RNG_UNIFORM:
if (!primitive_util::IsFloatingPointType(element_type) &&
!primitive_util::IsIntegralType(element_type) &&
element_type != PRED) {
return Internal(
"Element type not supported."
" Expected element to be of floating point type, integral type or"
" predicate type for RngUniform: %s",
instruction->ToString());
}
break;
case RNG_NORMAL:
if (!primitive_util::IsFloatingPointType(element_type)) {
return Internal(
"Element type not supported."
" Expected element to be FloatingPointType for RngNormal: %s",
instruction->ToString());
}
break;
default:
return Internal(
"Invalid Rng distribution %s",
RandomDistribution_Name(instruction->random_distribution()));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleRngBitGenerator(HloInstruction* hlo) {
if (!hlo->shape().IsTuple()) {
return absl::OkStatus();
}
if (hlo->shape().IsTuple() && hlo->shape().tuple_shapes_size() != 2) {
return Internal(
"Expected tuple shape with 2 elements for RngBitGenerator. Got: %s",
hlo->shape().ToString(true));
}
if (!ShapeUtil::Compatible(hlo->operand(0)->shape(),
hlo->shape().tuple_shapes(0))) {
return Internal(
"Expected state shape to match between input and output for "
"RngBitGenerator. Got %s vs. %s",
hlo->operand(0)->shape().ToString(true),
hlo->shape().tuple_shapes(0).ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleRngGetAndUpdateState(
HloInstruction* instruction) {
TF_RETURN_IF_ERROR(CheckOperandCount(instruction, 0));
const Shape& result_shape = instruction->shape();
const Shape expected_shape = ShapeUtil::MakeShape(U64, {2});
if (!ShapeUtil::Compatible(result_shape, expected_shape)) {
return Internal(
"Invalid RngGetAndUpdateState, expect result to have shape %s, got %s ",
StringifyShape(expected_shape), StringifyShape(result_shape));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleReverse(HloInstruction* reverse) {
return CheckShape(
reverse, ShapeInference::InferReverseShape(reverse->operand(0)->shape(),
reverse->dimensions()));
}
absl::Status ShapeVerifier::HandleTopK(HloInstruction* hlo) {
return CheckShape(
hlo, ShapeInference::InferTopKShape(hlo->operand(0)->shape(),
Cast<HloTopKInstruction>(hlo)->k()));
}
absl::Status ShapeVerifier::HandleSort(HloInstruction* hlo) {
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
if (sort->operand_count() < 1) {
return Internal("Expected at least 1 operand for %s instruction: %s",
HloOpcodeString(sort->opcode()), sort->ToString());
}
HloComputation* compare = sort->to_apply();
Shape compare_shape = compare->root_instruction()->shape();
if (!ShapeUtil::Compatible(compare_shape, ShapeUtil::MakeShape(PRED, {}))) {
return Internal(
"The Sort compare computation shape does not lead to a scalar "
"predicate shape: %s",
StringifyShape(compare_shape));
}
TF_RETURN_IF_ERROR(
CheckParameterCount(sort, compare, sort->operand_count() * 2));
for (int64_t parameter_idx = 0; parameter_idx < compare->num_parameters();
++parameter_idx) {
int64_t operand_idx = parameter_idx / 2;
Shape expected_scalar_shape = ShapeUtil::MakeShape(
sort->operand(operand_idx)->shape().element_type(), {});
Shape actual_parameter_shape =
compare->parameter_instruction(parameter_idx)->shape();
if (!ShapeUtil::CompatibleIgnoringFpPrecision(expected_scalar_shape,
actual_parameter_shape)) {
return Internal(
"Expected the %lld-th parameter of the compare computation of sort "
"to have shape %s, but got %s",
parameter_idx, StringifyShape(expected_scalar_shape),
StringifyShape(actual_parameter_shape));
}
}
for (int64_t operand = 1; operand < sort->operand_count(); ++operand) {
if (!ShapeUtil::SameDimensions(sort->operand(0)->shape(),
sort->operand(operand)->shape())) {
return Internal(
"Expected sort to have to have the same dimensions for all operands. "
"First operand shape is: %s\n, shape (operand index %lld) is: %s",
StringifyShape(sort->operand(0)->shape()), operand,
StringifyShape(sort->operand(operand)->shape()));
}
}
if (sort->sort_dimension() >= sort->operand(0)->shape().rank()) {
return Internal(
"Expected the sort_dimension %d of sort to be smaller than the rank %d "
"of the operand(s).",
sort->sort_dimension(), sort->shape().rank());
}
return CheckVariadicShape(sort);
}
absl::Status ShapeVerifier::HandleConstant(HloInstruction* constant) {
if (!Cast<HloConstantInstruction>(constant)->HasLiteral()) {
return Internal("Constant is required to have a valid literal: %s",
constant->ToString());
}
return CheckShape(constant, constant->literal().shape(),
true);
}
absl::Status ShapeVerifier::HandleIota(HloInstruction* hlo) {
auto* iota = Cast<HloIotaInstruction>(hlo);
if (!iota->shape().IsArray()) {
return Internal("Iota does not support non-array result.");
}
const int64_t rank = iota->shape().rank();
if (rank == 0) {
return Internal("Iota does not support scalars.");
}
int64_t iota_dimension = iota->iota_dimension();
if (iota_dimension >= rank || iota_dimension < 0) {
return Internal(
"The iota dimension cannot go beyond the operation rank or be "
"negative.");
}
PrimitiveType primitive_type = iota->shape().element_type();
if (!primitive_util::IsIntegralType(primitive_type) &&
!primitive_util::IsFloatingPointType(primitive_type) &&
!primitive_util::IsComplexType(primitive_type)) {
return InvalidArgument(
"Only support iota of integral, floating point or complex primitive "
"types, got %s",
PrimitiveType_Name(primitive_type));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
return CheckShape(get_tuple_element,
ShapeInference::InferGetTupleElementShape(
get_tuple_element->operand(0)->shape(),
get_tuple_element->tuple_index()));
}
namespace {
absl::Status SameElementTypesForOperandsAndToApplyParameters(
const HloInstruction& instruction, int64_t num_operands_to_check) {
const ProgramShape& to_apply = instruction.to_apply()->ComputeProgramShape();
for (int i = 0; i < num_operands_to_check; ++i) {
const Shape& parameter_shape = to_apply.parameters(i);
const Shape& operand_shape = instruction.operands()[i]->shape();
if (!ShapeUtil::SameElementType(parameter_shape, operand_shape)) {
return InvalidArgument(
"Shape mismatch between to_apply computation"
" parameter and operand %d in %s.",
i, instruction.ToString().c_str());
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::HandleReduce(HloInstruction* reduce) {
if (reduce->operand_count() % 2 != 0) {
return Internal(
"Expected an even number of operands for %s instruction: %s",
HloOpcodeString(reduce->opcode()), reduce->ToString());
}
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : reduce->operands()) {
operand_shapes.push_back(&operand->shape());
}
TF_RETURN_IF_ERROR(
CheckShape(reduce, ShapeInference::InferReduceShape(
operand_shapes, reduce->dimensions(),
reduce->to_apply()->ComputeProgramShape())));
return opts_.allow_mixed_precision
? absl::OkStatus()
: SameElementTypesForOperandsAndToApplyParameters(
*reduce, reduce->operand_count());
}
absl::Status ShapeVerifier::HandleBitcast(HloInstruction* bitcast) {
const Shape& output_shape = bitcast->shape();
const Shape& operand_shape = bitcast->operand(0)->shape();
if (opts_.layout_sensitive &&
opts_.shape_size(output_shape) != opts_.shape_size(operand_shape)) {
if (!opts_.allow_bitcast_to_have_different_size ||
!(output_shape.is_static() && operand_shape.is_static() &&
(ShapeUtil::ArrayDataSize(output_shape) ==
ShapeUtil::ArrayDataSize(operand_shape)))) {
return Internal(
"%s: Bitcast cannot have different shape sizes of output (%d) and "
"operand "
"(%d) (%s) (%s)",
bitcast->ToString(), opts_.shape_size(output_shape),
opts_.shape_size(operand_shape), output_shape.ToString(true),
operand_shape.ToString(true));
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleBroadcast(HloInstruction* broadcast) {
const Shape& operand_shape = broadcast->operand(0)->shape();
TF_RET_CHECK(SameElementType(broadcast->shape(), operand_shape))
<< broadcast->ToString();
TF_RET_CHECK(operand_shape.rank() == broadcast->dimensions().size())
<< broadcast->ToString();
for (int64_t operand_dimension = 0; operand_dimension < operand_shape.rank();
++operand_dimension) {
int64_t output_dimension = broadcast->dimensions()[operand_dimension];
TF_RET_CHECK((output_dimension < broadcast->shape().rank()) &&
output_dimension >= 0 &&
(broadcast->shape().dimensions(output_dimension) ==
operand_shape.dimensions(operand_dimension)))
<< broadcast->ToString() << " operand shape " << operand_shape;
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleDynamicReshape(
HloInstruction* dynamic_reshape) {
const Shape& operand_shape = dynamic_reshape->operand(0)->shape();
TF_RET_CHECK(SameElementType(dynamic_reshape->shape(), operand_shape));
TF_RET_CHECK(ShapeUtil::ElementsIn(dynamic_reshape->shape()) ==
ShapeUtil::ElementsIn(operand_shape));
TF_RET_CHECK(dynamic_reshape->shape().rank() + 1 ==
dynamic_reshape->operand_count());
for (int64_t i = 1; i < dynamic_reshape->operand_count(); ++i) {
TF_RET_CHECK(dynamic_reshape->operand(i)->shape().element_type() == S32);
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleReshape(HloInstruction* reshape) {
const Shape& operand_shape = reshape->operand(0)->shape();
TF_RET_CHECK(SameElementType(reshape->shape(), operand_shape));
TF_RET_CHECK(ShapeUtil::ElementsIn(reshape->shape()) ==
ShapeUtil::ElementsIn(operand_shape));
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleTranspose(HloInstruction* transpose) {
return CheckShape(
transpose, ShapeInference::InferTransposeShape(
transpose->operand(0)->shape(), transpose->dimensions()));
}
absl::Status ShapeVerifier::HandleParameter(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleFusion(HloInstruction* fusion) {
if (fusion->called_computations().size() != 1) {
return Internal("Fusion has a non-unary number of called computations (%s)",
fusion->ToString().c_str());
}
const Shape& root_computation_shape =
fusion->called_computations()[0]->root_instruction()->shape();
if (!ShapesSame(fusion->shape(), root_computation_shape)) {
return Internal(
"Fused computation shape (%s) is not equal to the fusion shape (%s)",
root_computation_shape.ToString(true), fusion->shape().ToString(true));
}
auto& fused_parameters = fusion->fused_parameters();
if (fused_parameters.size() > fusion->operand_count()) {
return Internal(
"Fused parameter count (%d) is greater than the number of operands (%d)"
" passed to the fusion instruction in: %s.",
fused_parameters.size(), fusion->operand_count(),
fusion->ToString().c_str());
}
for (HloInstruction* fused_param : fused_parameters) {
int64_t param_no = fused_param->parameter_number();
if (!ShapesSame(fused_param->shape(), fusion->operand(param_no)->shape())) {
return Internal(
"Shape mismatch between parameter number %d and its operand in "
"%s.",
param_no, fusion->ToString().c_str());
}
}
const HloFusionInstruction* casted_fusion =
DynCast<const HloFusionInstruction>(fusion);
for (const auto& pair : casted_fusion->output_to_operand_aliasing()) {
TF_RET_CHECK(pair.second.first < casted_fusion->operand_count())
<< "Invalid aliasing operand index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(
casted_fusion->operand(pair.second.first)->shape(), pair.second.second))
<< "Invalid aliasing operand shape index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(casted_fusion->shape(), pair.first))
<< "Invalid aliasing output shape index.";
const Shape& output_subshape =
ShapeUtil::GetSubshape(casted_fusion->shape(), pair.first);
const Shape& operand_subshape = ShapeUtil::GetSubshape(
casted_fusion->operand(pair.second.first)->shape(), pair.second.second);
if (opts_.layout_sensitive) {
if (casted_fusion->IsFused()) {
TF_RET_CHECK(
Shape::Equal().IgnoreTilesInLayout().IgnoreMemorySpaceInLayout()(
operand_subshape, output_subshape))
<< "Different aliasing shapes: "
<< operand_subshape.ToString(true) << " vs "
<< output_subshape.ToString(true);
} else {
TF_RET_CHECK(Shape::Equal()(operand_subshape, output_subshape))
<< "Different aliasing shapes: "
<< operand_subshape.ToString(true) << " vs "
<< output_subshape.ToString(true);
}
} else {
TF_RET_CHECK(ShapeUtil::Compatible(output_subshape, operand_subshape))
<< "Different aliasing shapes: " << operand_subshape.ToString()
<< " vs " << output_subshape.ToString();
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleCall(HloInstruction* call) {
TF_RETURN_IF_ERROR(
CheckParameterCount(call, call->to_apply(), call->operand_count()));
for (int64_t i = 0; i < call->to_apply()->num_parameters(); ++i) {
TF_RETURN_IF_ERROR(CheckOperandAndParameter(call, i, call->to_apply(), i));
}
if (call->is_composite()) {
TF_RET_CHECK(call->has_frontend_attributes())
<< "A composite call op must have frontend attributes";
auto map = call->frontend_attributes().map();
if (auto name = map.find("composite.name");
name == map.end() || name->second.empty()) {
return InvalidArgument(
"A composite call op must have frontend attributes with key "
"composite.name whose value is non-empty");
}
if (auto attributes = map.find("composite.attributes");
attributes != map.end() && attributes->second.empty()) {
return InvalidArgument(
"A composite call op must have frontend attributes with key "
"composite.attributes whose value is default: {} or non-empty");
}
if (auto version_str = map.find("composite.version");
version_str != map.end()) {
int64_t version = 0;
if (!absl::SimpleAtoi(version_str->second, &version) || version < 0) {
return InvalidArgument(
"A composite call op must have frontend attributes with a "
"composite.version whose value is a non-negative integer but got: "
"%s",
version_str->second);
}
}
}
return CheckShape(call, call->to_apply()->root_instruction()->shape());
}
absl::Status ShapeVerifier::HandleCustomCall(HloInstruction* instruction) {
const HloCustomCallInstruction* custom_call =
DynCast<const HloCustomCallInstruction>(instruction);
TF_RET_CHECK(custom_call != nullptr);
if (custom_call->layout_constrained() &&
!custom_call->IsCustomCall("LayoutConstraint")) {
TF_RET_CHECK(LayoutUtil::HasLayout(custom_call->shape()));
TF_RET_CHECK(custom_call->operand_count() ==
custom_call->operand_shapes_with_layout().size());
for (int64_t i = 0; i < custom_call->operand_count(); ++i) {
const Shape& operand_shape_with_layout =
custom_call->operand_shapes_with_layout()[i];
TF_RET_CHECK(ShapeUtil::Compatible(custom_call->operand(i)->shape(),
operand_shape_with_layout))
<< custom_call->operand(i)->shape().ToString(true) << " operand "
<< operand_shape_with_layout.ToString();
TF_RET_CHECK(LayoutUtil::HasLayout(operand_shape_with_layout));
}
}
for (const auto& pair : custom_call->output_to_operand_aliasing()) {
TF_RET_CHECK(pair.second.first < custom_call->operand_count())
<< "Invalid aliasing operand index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(
custom_call->operand(pair.second.first)->shape(), pair.second.second))
<< "Invalid aliasing operand shape index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(custom_call->shape(), pair.first))
<< "Invalid aliasing output shape index.";
const Shape& output_subshape =
ShapeUtil::GetSubshape(custom_call->shape(), pair.first);
const Shape& operand_subshape = ShapeUtil::GetSubshape(
custom_call->operand(pair.second.first)->shape(), pair.second.second);
if (opts_.layout_sensitive) {
TF_RET_CHECK(operand_subshape == output_subshape)
<< "Different aliasing shapes: " << operand_subshape.ToString()
<< " vs " << output_subshape.ToString();
} else {
TF_RET_CHECK(ShapeUtil::Compatible(output_subshape, operand_subshape))
<< "Different aliasing shapes: " << operand_subshape.ToString()
<< " vs " << output_subshape.ToString();
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleSlice(HloInstruction* slice) {
return CheckShape(slice,
ShapeInference::InferSliceShape(
slice->operand(0)->shape(), slice->slice_starts(),
slice->slice_limits(), slice->slice_strides()));
}
absl::Status ShapeVerifier::HandleDynamicSlice(HloInstruction* dynamic_slice) {
return CheckShape(
dynamic_slice,
ShapeInference::InferDynamicSliceShape(
dynamic_slice->operand(0)->shape(),
Cast<HloDynamicSliceInstruction>(dynamic_slice)->index_shapes(),
dynamic_slice->dynamic_slice_sizes()));
}
absl::Status ShapeVerifier::HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
return CheckShape(
dynamic_update_slice,
ShapeInference::InferDynamicUpdateSliceShape(
dynamic_update_slice->operand(0)->shape(),
dynamic_update_slice->operand(1)->shape(),
Cast<HloDynamicUpdateSliceInstruction>(dynamic_update_slice)
->index_shapes()));
}
absl::Status ShapeVerifier::HandleTuple(HloInstruction* tuple) {
return CheckVariadicShape(tuple);
}
absl::Status ShapeVerifier::HandleMap(HloInstruction* map) {
std::vector<const Shape*> operand_shapes;
int64_t max_operand_rank = 0;
for (const HloInstruction* operand : map->operands()) {
operand_shapes.push_back(&operand->shape());
max_operand_rank = std::max(max_operand_rank, operand->shape().rank());
}
std::vector<int64_t> map_dims(max_operand_rank);
std::iota(map_dims.begin(), map_dims.end(), 0);
TF_RETURN_IF_ERROR(CheckShape(
map,
ShapeInference::InferMapShape(
operand_shapes, map->to_apply()->ComputeProgramShape(), map_dims)));
return opts_.allow_mixed_precision
? absl::OkStatus()
: SameElementTypesForOperandsAndToApplyParameters(
*map, map->operand_count());
}
absl::Status ShapeVerifier::HandleReduceWindow(HloInstruction* reduce_window) {
auto reduce_window_instr = Cast<HloReduceWindowInstruction>(reduce_window);
auto input_shapes = reduce_window_instr->input_shapes();
auto init_shapes = reduce_window_instr->init_value_shapes();
TF_RETURN_IF_ERROR(CheckShape(
reduce_window, ShapeInference::InferReduceWindowShape(
input_shapes, init_shapes, reduce_window->window(),
reduce_window->to_apply()->ComputeProgramShape())));
return opts_.allow_mixed_precision
? absl::OkStatus()
: SameElementTypesForOperandsAndToApplyParameters(
*reduce_window, reduce_window->operand_count());
}
absl::Status ShapeVerifier::HandleSelectAndScatter(
HloInstruction* instruction) {
return CheckShape(
instruction,
ShapeInference::InferSelectAndScatterShape(
instruction->operand(0)->shape(),
instruction->select()->ComputeProgramShape(), instruction->window(),
instruction->operand(1)->shape(), instruction->operand(2)->shape(),
instruction->scatter()->ComputeProgramShape()));
}
absl::Status ShapeVerifier::HandleWhile(HloInstruction* xla_while) {
TF_RETURN_IF_ERROR(
CheckParameterCount(xla_while, xla_while->while_body(), 1));
TF_RETURN_IF_ERROR(
CheckParameterCount(xla_while, xla_while->while_condition(), 1));
TF_RETURN_IF_ERROR(
CheckOperandAndParameter(xla_while, 0, xla_while->while_body(), 0));
TF_RETURN_IF_ERROR(
CheckOperandAndParameter(xla_while, 0, xla_while->while_condition(), 0));
const Shape& conditional_shape =
xla_while->while_condition()->root_instruction()->shape();
if (!ShapeUtil::Compatible(conditional_shape,
ShapeUtil::MakeShape(PRED, {}))) {
return Internal(
"Conditional computation shape does not lead to a scalar predicate "
"shape: %s",
StringifyShape(conditional_shape));
}
return CheckShape(xla_while,
xla_while->while_body()->root_instruction()->shape());
}
absl::Status ShapeVerifier::HandleConditional(HloInstruction* conditional) {
if (!ShapeUtil::IsScalar(conditional->operand(0)->shape())) {
return InvalidArgument(
"The first operand of conditional must be a scalar. Got %s",
conditional->operand(0)->shape().DebugString());
}
const int num_branches = conditional->branch_count();
PrimitiveType operand0_type = conditional->operand(0)->shape().element_type();
if (operand0_type == PRED) {
TF_RET_CHECK(num_branches == 2);
} else {
if (operand0_type != S32) {
return InvalidArgument(
"The first operand of indexed conditional must be a scalar of S32. "
"Got type %s.",
PrimitiveType_Name(operand0_type));
}
TF_RET_CHECK(num_branches >= 1);
}
TF_RETURN_IF_ERROR(CheckOperandCount(conditional, num_branches + 1));
for (int j = 0; j < num_branches; ++j) {
TF_RETURN_IF_ERROR(CheckParameterCount(
conditional, conditional->branch_computation(j), 1));
TF_RETURN_IF_ERROR(CheckOperandAndParameter(
conditional, j + 1, conditional->branch_computation(j), 0));
TF_RETURN_IF_ERROR(CheckShape(
conditional,
conditional->branch_computation(j)->root_instruction()->shape()));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandlePad(HloInstruction* pad) {
return CheckShape(pad, ShapeInference::InferPadShape(pad->operand(0)->shape(),
pad->operand(1)->shape(),
pad->padding_config()));
}
namespace {
absl::Status CheckAsyncOpOperand(const HloInstruction* async_op) {
const HloInstruction* operand = async_op->operand(0);
if (operand->opcode() != HloOpcode::kAsyncStart &&
operand->opcode() != HloOpcode::kAsyncUpdate) {
return Internal(
"%s expects operand to be async-update or async-done, found "
"%s.",
HloOpcodeString(async_op->opcode()),
HloOpcodeString(operand->opcode()));
}
if (*async_op->async_wrapped_computation() !=
*operand->async_wrapped_computation()) {
return Internal(
"The %s expects its wrapped async computation to be identical to its "
"operand's wrapped async computation (%s vs %s), thread name (%s vs "
"%s).",
HloOpcodeString(async_op->opcode()),
async_op->async_wrapped_instruction()->ToString(),
operand->async_wrapped_instruction()->ToString(),
async_op->async_wrapped_computation()->execution_thread(),
operand->async_wrapped_computation()->execution_thread());
}
return absl::OkStatus();
}
absl::Status CheckAsyncOpComputationThreadName(const HloInstruction* async_op) {
absl::string_view async_execution_thread = async_op->async_execution_thread();
if (async_execution_thread !=
async_op->async_wrapped_computation()->execution_thread()) {
return Internal(
"%s expects same async thread name as wrapped computation's "
"thread name (%s vs %s).",
HloOpcodeString(async_op->opcode()), async_execution_thread,
async_op->async_wrapped_computation()->execution_thread());
}
return CheckNestedComputationThreadNameEqual(
async_op->async_wrapped_computation(),
false);
}
absl::Status CheckCallableInstructionThreadName(
const HloInstruction* instruction, bool skip_nested_async_op_check) {
for (const HloComputation* computation : instruction->called_computations()) {
if (instruction->parent() != nullptr) {
if (instruction->parent()->execution_thread() !=
computation->execution_thread()) {
return Internal(
"callable instruction %s expects parent computation thread name "
"same as called computation's thread name (%s vs %s).",
instruction->ToString(), instruction->parent()->execution_thread(),
computation->execution_thread());
}
}
TF_RETURN_IF_ERROR(CheckNestedComputationThreadNameEqual(
computation, skip_nested_async_op_check));
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::CheckAsyncOpComputationShapes(
const HloInstruction* async_op, const Shape& async_shape) {
if (!async_shape.IsTuple() || async_shape.tuple_shapes_size() < 2) {
return Internal(
"The %s expects the async shape to be a tuple of at least two "
"elements, found %s.",
HloOpcodeString(async_op->opcode()), async_shape.ToString());
}
ProgramShape computation_shape =
async_op->async_wrapped_computation()->ComputeProgramShape();
Shape param_shape = ShapeUtil::MakeTupleShape(computation_shape.parameters());
if (!ShapesSame(async_shape.tuple_shapes(0), param_shape)) {
return Internal(
"The %s expects the async shape at index {0} to match async "
"computation parameter shape (%s vs %s).",
HloOpcodeString(async_op->opcode()),
async_shape.tuple_shapes(0).ToString(true),
param_shape.ToString(true));
}
if (!ShapesSame(async_shape.tuple_shapes(1), computation_shape.result())) {
return Internal(
"The %s expects the async shape at index {1} to match the async "
"computation root shape (%s vs %s).",
HloOpcodeString(async_op->opcode()),
async_shape.tuple_shapes(1).ToString(true),
computation_shape.result().ToString(true));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleAsyncStart(HloInstruction* async_start) {
TF_RETURN_IF_ERROR(
CheckAsyncOpComputationShapes(async_start, async_start->shape()));
TF_RETURN_IF_ERROR(CheckAsyncOpComputationThreadName(async_start));
const Shape& param_shape = async_start->shape().tuple_shapes(0);
for (int i = 0; i < async_start->operand_count(); ++i) {
if (!ShapesSame(param_shape.tuple_shapes(i),
async_start->operand(i)->shape())) {
return Internal(
"The %s expects the shape of operand %d to match the async shape at "
"index {0} (%s vs %s).",
HloOpcodeString(async_start->opcode()), i,
async_start->operand(i)->shape().ToString(true),
param_shape.tuple_shapes(i).ToString(true));
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleAsyncUpdate(HloInstruction* async_update) {
TF_RETURN_IF_ERROR(CheckAsyncOpComputationThreadName(async_update));
if (!ShapesSame(async_update->operand(0)->shape(), async_update->shape())) {
return Internal(
"The %s expects the shape of operand and output to match (%s vs %s).",
HloOpcodeString(async_update->opcode()),
async_update->operand(0)->shape().ToString(true),
async_update->shape().ToString(true));
}
TF_RETURN_IF_ERROR(
CheckAsyncOpComputationShapes(async_update, async_update->shape()));
return CheckAsyncOpOperand(async_update);
}
absl::Status ShapeVerifier::HandleAsyncDone(HloInstruction* async_done) {
TF_RETURN_IF_ERROR(CheckAsyncOpComputationThreadName(async_done));
TF_RETURN_IF_ERROR(CheckAsyncOpComputationShapes(
async_done, async_done->operand(0)->shape()));
const Shape& root_shape = async_done->operand(0)->shape().tuple_shapes(1);
if (!ShapesSame(root_shape, async_done->shape())) {
return Internal(
"The %s expects the shape of output to match the async shape at index "
"{1} (%s vs %s).",
HloOpcodeString(async_done->opcode()),
async_done->shape().ToString(true), root_shape.ToString(true));
}
return CheckAsyncOpOperand(async_done);
}
absl::Status ShapeVerifier::HandleCopyStart(HloInstruction* copy_start) {
return CheckShape(copy_start,
ShapeUtil::MakeTupleShape({copy_start->operand(0)->shape(),
copy_start->operand(0)->shape(),
ShapeUtil::MakeShape(U32, {})}),
true);
}
absl::Status ShapeVerifier::HandleCopyDone(HloInstruction* copy_done) {
const Shape& operand_shape = copy_done->operand(0)->shape();
const Shape& dest_shape = ShapeUtil::GetTupleElementShape(operand_shape, 0);
const Shape& src_shape = ShapeUtil::GetTupleElementShape(operand_shape, 1);
if (!ShapesSame(dest_shape, src_shape,
Shape::Equal()
.IgnoreMemorySpaceInLayout()
.IgnoreSplitConfigInLayout())) {
return Internal(
"Source and destination buffers in CopyDone arguments need to be the "
"same shape found %s and %s\n%s",
StringifyShape(dest_shape), StringifyShape(src_shape),
copy_done->ToString());
}
return CheckShape(copy_done, ShapeUtil::GetTupleElementShape(
copy_done->operand(0)->shape(), 0));
}
absl::Status ShapeVerifier::HandleSend(HloInstruction* send) {
return CheckShape(send,
ShapeUtil::MakeTupleShape({send->operand(0)->shape(),
ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()}),
true);
}
absl::Status ShapeVerifier::HandleSendDone(HloInstruction* send_done) {
return CheckShape(send_done, ShapeUtil::MakeTokenShape());
}
absl::Status ShapeVerifier::HandleRecv(HloInstruction* recv) {
return CheckShape(
recv,
ShapeUtil::MakeTupleShape(
{ShapeUtil::GetTupleElementShape(recv->shape(), 0),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()}),
true);
}
absl::Status ShapeVerifier::HandleRecvDone(HloInstruction* recv_done) {
return CheckShape(
recv_done,
ShapeUtil::MakeTupleShape(
{ShapeUtil::GetTupleElementShape(recv_done->operand(0)->shape(), 0),
ShapeUtil::MakeTokenShape()}));
}
absl::Status ShapeVerifier::HandleBatchNormTraining(
HloInstruction* batch_norm_training) {
return CheckShape(batch_norm_training,
ShapeInference::InferBatchNormTrainingShape(
batch_norm_training->operand(0)->shape(),
batch_norm_training->operand(1)->shape(),
batch_norm_training->operand(2)->shape(),
batch_norm_training->feature_index()));
}
absl::Status ShapeVerifier::HandleBatchNormInference(
HloInstruction* batch_norm_inference) {
return CheckShape(batch_norm_inference,
ShapeInference::InferBatchNormInferenceShape(
batch_norm_inference->operand(0)->shape(),
batch_norm_inference->operand(1)->shape(),
batch_norm_inference->operand(2)->shape(),
batch_norm_inference->operand(3)->shape(),
batch_norm_inference->operand(4)->shape(),
batch_norm_inference->feature_index()));
}
absl::Status ShapeVerifier::HandleBatchNormGrad(
HloInstruction* batch_norm_grad) {
return CheckShape(batch_norm_grad, ShapeInference::InferBatchNormGradShape(
batch_norm_grad->operand(0)->shape(),
batch_norm_grad->operand(1)->shape(),
batch_norm_grad->operand(2)->shape(),
batch_norm_grad->operand(3)->shape(),
batch_norm_grad->operand(4)->shape(),
batch_norm_grad->feature_index()));
}
namespace {
absl::Status CheckMixedPrecisionOperands(const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kDot:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAsyncDone:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncStart:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kFusion:
case HloOpcode::kGetTupleElement:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReducePrecision:
case HloOpcode::kReduceWindow:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSort:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
break;
default: {
PrimitiveType fp_type = PRIMITIVE_TYPE_INVALID;
for (auto operand : instruction->operands()) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
operand->shape(),
[&](const Shape& subshape,
const ShapeIndex& index) -> absl::Status {
if (!ShapeUtil::ElementIsFloating(subshape)) {
return absl::OkStatus();
}
if (fp_type == PRIMITIVE_TYPE_INVALID) {
fp_type = subshape.element_type();
} else if (fp_type != subshape.element_type()) {
return Internal(
"Seen floating point types of different precisions in "
"%s, but mixed precision is disallowed.",
instruction->ToString());
}
return absl::OkStatus();
}));
}
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::HandleGather(HloInstruction* gather) {
return CheckShape(
gather,
ShapeInference::InferGatherShape(
gather->operand(0)->shape(), gather->operand(1)->shape(),
gather->gather_dimension_numbers(), gather->gather_slice_sizes()));
}
absl::Status ShapeVerifier::HandleScatter(HloInstruction* scatter) {
absl::InlinedVector<const Shape*, 3> arg_shapes;
arg_shapes.reserve(scatter->operand_count());
for (const HloInstruction* operand : scatter->operands()) {
arg_shapes.push_back(&operand->shape());
}
return CheckShape(scatter,
ShapeInference::InferScatterShape(
arg_shapes, scatter->to_apply()->ComputeProgramShape(),
scatter->scatter_dimension_numbers()));
}
absl::Status ShapeVerifier::HandleAfterAll(HloInstruction* token) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : token->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(token, ShapeUtil::MakeTokenShape());
}
absl::Status ShapeVerifier::HandleAddDependency(
HloInstruction* add_dependency) {
TF_RETURN_IF_ERROR(CheckIsTokenOperand(add_dependency, 1));
return CheckShape(add_dependency, add_dependency->operand(0)->shape());
}
absl::Status ShapeVerifier::HandleGetDimensionSize(HloInstruction* get_size) {
return CheckShape(get_size,
ShapeInference::InferGetDimensionSizeShape(
get_size->operand(0)->shape(), get_size->dimension()));
}
absl::Status ShapeVerifier::HandleSetDimensionSize(HloInstruction* set_size) {
return CheckShape(set_size,
ShapeInference::InferSetDimensionSizeShape(
set_size->operand(0)->shape(),
set_size->operand(1)->shape(), set_size->dimension()));
}
absl::Status ShapeVerifier::CheckShape(
const HloInstruction* instruction, const Shape& inferred_shape,
bool only_compare_minor_to_major_in_layout) {
if (!opts_.allow_mixed_precision) {
TF_RETURN_IF_ERROR(CheckMixedPrecisionOperands(instruction));
}
bool equal = [&] {
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kCustomCall:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kParameter:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kTuple:
case HloOpcode::kWhile: {
Shape::Equal equal;
if (only_compare_minor_to_major_in_layout) {
equal.MinorToMajorOnlyInLayout();
}
return ShapesSame(instruction->shape(), inferred_shape, equal);
}
case HloOpcode::kDynamicUpdateSlice: {
Shape::Equal equal;
if (only_compare_minor_to_major_in_layout) {
equal.MinorToMajorOnlyInLayout();
}
if (instruction->parent()->IsFusionComputation()) {
equal.IgnoreMemorySpaceInLayout().IgnoreTilesInLayout();
}
return ShapesSame(instruction->shape(), inferred_shape, equal);
}
case HloOpcode::kCopy: {
if (opts_.IsLayoutSensitive()) {
if (instruction->shape().has_layout() &&
inferred_shape.has_layout()) {
int64_t instruction_memory_space =
instruction->shape().layout().memory_space();
int64_t operand_memory_space =
inferred_shape.layout().memory_space();
if (instruction_memory_space != operand_memory_space &&
(instruction_memory_space == Layout::kHostMemorySpace ||
operand_memory_space == Layout::kHostMemorySpace)) {
return Shape::Equal().IgnoreMemorySpaceInLayout()(
instruction->shape(), inferred_shape);
}
}
}
[[fallthrough]];
}
default:
if (opts_.allow_mixed_precision) {
return ShapeUtil::CompatibleIgnoringFpPrecision(instruction->shape(),
inferred_shape);
} else {
return ShapeUtil::Compatible(instruction->shape(), inferred_shape);
}
}
}();
if (!equal) {
return Internal(
"Expected instruction to have shape equal to %s, actual "
"shape is %s:\n%s",
StringifyShape(inferred_shape), StringifyShape(instruction->shape()),
instruction->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::CheckShape(
const HloInstruction* instruction,
const absl::StatusOr<Shape>& inferred_shape_status) {
if (!inferred_shape_status.ok()) {
absl::Status s = inferred_shape_status.status();
tsl::errors::AppendToMessage(&s, ", for instruction ",
instruction->ToString());
return s;
}
return CheckShape(instruction, inferred_shape_status.value());
}
absl::Status ShapeVerifier::CheckUnaryShape(const HloInstruction* instruction) {
return CheckShape(instruction,
ShapeInference::InferUnaryOpShape(instruction->opcode(),
instruction->operand(0)));
}
absl::Status ShapeVerifier::CheckBinaryShape(
const HloInstruction* instruction) {
return CheckShape(
instruction, ShapeInference::InferBinaryOpShape(instruction->opcode(),
instruction->operand(0),
instruction->operand(1)));
}
absl::Status ShapeVerifier::CheckTernaryShape(
const HloInstruction* instruction) {
return CheckShape(instruction,
ShapeInference::InferTernaryOpShape(
instruction->opcode(), instruction->operand(0),
instruction->operand(1), instruction->operand(2)));
}
absl::Status ShapeVerifier::CheckVariadicShape(
const HloInstruction* instruction) {
return CheckShape(instruction,
ShapeInference::InferVariadicOpShape(
instruction->opcode(), instruction->operands()));
}
absl::Status ShapeVerifier::VerifyEntryComputationLayout(
const HloModule& module) {
const HloComputation* computation = module.entry_computation();
const auto& layout = module.entry_computation_layout();
const ShapeLayout& result_layout = layout.result_layout();
TF_RETURN_IF_ERROR(
ShapeUtil::ValidateShapeWithOptionalLayout(result_layout.shape()));
if (!ShapesSame(computation->root_instruction()->shape(),
result_layout.shape(),
Shape::Equal()
.IgnoreTilesInLayout()
.IgnoreTailPaddingAlignmentInElements()
.IgnoreMemorySpaceInLayout())) {
return Internal(
"Shape of the root instruction of entry computation (%s) should be "
"compatible to one specified in module's entry computation layout (%s)",
StringifyShape(computation->root_instruction()->shape()),
StringifyShape(result_layout.shape()));
}
if (computation->num_parameters() != layout.parameter_count()) {
return Internal(
"Number of parameters in entry computation layout (%d) must be same "
"as number of parameters of entry computation (%d)",
layout.parameter_count(), computation->num_parameters());
}
for (int i = 0; i < computation->num_parameters(); ++i) {
const HloInstruction* parameter = computation->parameter_instruction(i);
TF_RETURN_IF_ERROR(
ShapeUtil::ValidateShapeWithOptionalLayout(layout.parameter_shape(i)));
if (!ShapesSame(parameter->shape(), layout.parameter_shape(i),
Shape::Equal()
.IgnoreTilesInLayout()
.IgnoreTailPaddingAlignmentInElements()
.IgnoreMemorySpaceInLayout())) {
return Internal(
"Shape of the entry computation parameter %d is %s should be "
"compatible to the one specified in module's entry computation "
"layout %s",
i, StringifyShape(parameter->shape()),
StringifyShape(layout.parameter_shape(i)));
}
}
const auto& alias_config = module.input_output_alias_config();
TF_RETURN_IF_ERROR(alias_config.ForEachAliasWithStatus(
[&](ShapeIndex result_index,
HloInputOutputAliasConfig::Alias alias) -> absl::Status {
if (!alias.must_alias()) {
return absl::OkStatus();
}
const Shape& result_shape =
ShapeUtil::GetSubshape(result_layout.shape(), result_index);
const Shape& parameter_shape = ShapeUtil::GetSubshape(
layout.parameter_layout(alias.parameter_number).shape(),
alias.parameter_index);
if (result_shape != parameter_shape) {
return Internal(
"Shape and memory space of the result at index %s (%s) "
"must be the same as the shape and memory spaceof aliased "
"parameter %d at index %s (%s)",
result_index.ToString(), StringifyShape(result_shape),
alias.parameter_number, alias.parameter_index.ToString(),
StringifyShape(parameter_shape));
}
return absl::OkStatus();
}));
return absl::OkStatus();
}
std::string ComputationsToString(
absl::Span<HloComputation* const> computations) {
return absl::StrJoin(computations, ",",
[](std::string* s, const HloComputation* computation) {
absl::StrAppend(s, computation->name());
});
}
absl::Status VerifyInstructionNameUnchanged(const HloModule& module,
const HloVerifierOpts& opts) {
if (!opts.verify_instruction_name_unchanged) {
return absl::OkStatus();
}
for (auto* comp : module.computations()) {
for (auto* inst : comp->instructions()) {
if (inst->metadata().scheduling_name().empty()) {
continue;
}
if (inst->metadata().scheduling_name() != inst->name() &&
(!absl::StrContains(inst->name(), ".remat") &&
!absl::StrContains(inst->name(), ".clone"))) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected instruction name to remain the same. Was '",
inst->metadata().scheduling_name(), "' is '", inst->name(), "'."));
}
}
}
return absl::OkStatus();
}
absl::Status VerifyHloStructure(HloModule* module) {
for (const HloComputation* computation : module->computations()) {
if (computation == nullptr) {
return Internal("Computation in module %s is a null pointer",
module->name());
}
if (computation->parent() == nullptr) {
return Internal("Computation %s has a null parent pointer",
computation->name());
}
if (computation->parent() != module) {
return Internal("Computation %s parent() does not point to parent module",
computation->name());
}
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction == nullptr) {
return Internal("Instruction in computation %s is a null pointer",
computation->name());
}
if (instruction->parent() == nullptr) {
return Internal("Instruction %s has a null parent pointer",
instruction->name());
}
if (instruction->parent() != computation) {
return Internal(
"Instruction %s parent() does not point to parent computation",
instruction->name());
}
}
}
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
for (int i = 0; i < instruction->operand_count(); ++i) {
const HloInstruction* operand = instruction->operand(i);
if (operand == nullptr) {
return Internal(
"Operand %d (out of %d) of instruction: %s is a null pointer", i,
instruction->operand_count(), instruction->name());
}
if (operand->parent() == nullptr) {
return Internal(
"Operand %d (out of %d) of instruction: %s has a null pointer "
"parent",
i, instruction->operand_count(), instruction->name());
}
if (operand->parent() != instruction->parent()) {
return Internal(
"Operand %d (%s) of instruction %s is in a different "
"computation: %s vs %s",
i, operand->name(), instruction->name(),
operand->parent() ? operand->parent()->name() : "(null)",
instruction->parent()->name());
}
}
}
}
return absl::OkStatus();
}
namespace {
bool ShapeContainsToken(const Shape& shape) {
bool contains_token = false;
ShapeUtil::ForEachSubshape(
shape, [&contains_token](const Shape& subshape, const ShapeIndex&) {
if (subshape.IsToken()) {
contains_token = true;
}
});
return contains_token;
}
absl::Status CheckSameChannel(const HloInstruction* instr1,
const HloInstruction* instr2) {
if (instr1->channel_id() != instr2->channel_id()) {
return Internal(
"Expected to have the same channel id, actual channel ids are: %s "
"(%d), %s (%d)",
instr1->ToString(), *instr1->channel_id(), instr2->ToString(),
*instr2->channel_id());
}
return absl::OkStatus();
}
absl::Status CheckSameIsHostTransfer(const HloInstruction* instr1,
const HloInstruction* instr2) {
const HloSendRecvInstruction* send_recv1 =
DynCast<const HloSendRecvInstruction>(instr1);
const HloSendRecvInstruction* send_recv2 =
DynCast<const HloSendRecvInstruction>(instr2);
TF_RET_CHECK(send_recv1 != nullptr);
TF_RET_CHECK(send_recv2 != nullptr);
if (send_recv1->is_host_transfer() != send_recv2->is_host_transfer()) {
return Internal(
"Expected instructions to have the same is-host-transfer property: "
"%s, "
"%s ",
instr1->ToString(), instr2->ToString());
}
return absl::OkStatus();
}
absl::Status VerifySingleUser(
const HloInstruction* instruction,
const absl::flat_hash_set<HloOpcode>& expected_users) {
TF_RET_CHECK(instruction->users().size() == 1)
<< "The " << instruction->opcode()
<< " instruction requires one consumer, found "
<< instruction->users().size();
const HloInstruction* user = instruction->users().front();
TF_RET_CHECK(expected_users.contains(user->opcode()))
<< "The consumer of a " << instruction->opcode()
<< " instruction needs to be one of ("
<< absl::StrJoin(expected_users, ", ",
[](std::string* out, HloOpcode opcode) {
absl::StrAppend(out, HloOpcodeString(opcode));
})
<< "), found " << user->opcode();
return absl::OkStatus();
}
absl::Status VerifySingleOperand(
const HloInstruction* instruction,
const std::vector<HloOpcode>& expected_operands) {
TF_RET_CHECK(instruction->operands().size() == 1)
<< "The " << instruction->opcode()
<< " instruction requires one consumer, found "
<< instruction->users().size();
const HloInstruction* operand = instruction->operand(0);
TF_RET_CHECK(absl::c_find(expected_operands, operand->opcode()) !=
expected_operands.end())
<< "The operand of a " << instruction->opcode()
<< " instruction needs to be "
<< absl::StrJoin(expected_operands, " or ",
[](std::string* out, HloOpcode opcode) {
absl::StrAppend(out, HloOpcodeString(opcode));
})
<< ", found " << operand->opcode();
return absl::OkStatus();
}
absl::Status VerifyAsynchronousInstructionPairs(const HloModule& module) {
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
switch (instruction->opcode()) {
case HloOpcode::kAsyncStart: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kAsyncUpdate, HloOpcode::kAsyncDone}));
break;
}
case HloOpcode::kAsyncUpdate: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kAsyncStart, HloOpcode::kAsyncUpdate}));
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kAsyncUpdate, HloOpcode::kAsyncDone}));
break;
}
case HloOpcode::kAsyncDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kAsyncStart, HloOpcode::kAsyncUpdate}));
break;
}
case HloOpcode::kAllReduceStart: {
TF_RETURN_IF_ERROR(
VerifySingleUser(instruction, {HloOpcode::kAllReduceDone}));
break;
}
case HloOpcode::kAllReduceDone: {
TF_RETURN_IF_ERROR(
VerifySingleOperand(instruction, {HloOpcode::kAllReduceStart}));
break;
}
case HloOpcode::kCopyStart: {
TF_RETURN_IF_ERROR(
VerifySingleUser(instruction, {HloOpcode::kCopyDone}));
break;
}
case HloOpcode::kCopyDone: {
TF_RETURN_IF_ERROR(
VerifySingleOperand(instruction, {HloOpcode::kCopyStart}));
break;
}
case HloOpcode::kCollectivePermuteStart: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kCollectivePermuteDone}));
break;
}
case HloOpcode::kCollectivePermuteDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kCollectivePermuteStart}));
break;
}
case HloOpcode::kSend: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kSendDone, HloOpcode::kTuple}));
break;
}
case HloOpcode::kSendDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kSend, HloOpcode::kGetTupleElement}));
break;
}
case HloOpcode::kRecv: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kRecvDone, HloOpcode::kTuple}));
break;
}
case HloOpcode::kRecvDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kRecv, HloOpcode::kGetTupleElement}));
break;
}
default:
break;
}
}
}
return absl::OkStatus();
}
absl::Status VerifyAsyncComputation(const HloComputation* async_computation) {
if (!async_computation->CanExpandIntoSingleInstruction()) {
return FailedPrecondition(
"Asynchronous computation %s expected to contain only the root and "
"parameter instructions.",
async_computation->name());
}
return absl::OkStatus();
}
absl::Status VerifyLayoutConstrainedAllReduce(const HloModule& module) {
const HloAllReduceInstruction* reference = nullptr;
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if ((instruction->opcode() != HloOpcode::kAllReduce) &&
(instruction->opcode() != HloOpcode::kAllReduceStart)) {
continue;
}
auto all_reduce = DynCast<HloAllReduceInstruction>(instruction);
if (!reference) {
reference = all_reduce;
}
if (reference->constrain_layout() != all_reduce->constrain_layout()) {
return FailedPrecondition(
"HloModule has a mix of layout constrained and unconstrained "
"AllReduce instructions.");
}
}
}
return absl::OkStatus();
}
absl::Status VerifyChannels(const HloModule& module,
const HloVerifierOpts& opts) {
absl::flat_hash_map<int64_t, std::vector<const HloInstruction*>>
channel_instructions;
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
auto channel_instr = DynCast<HloChannelInstruction>(instruction);
if (!channel_instr || !channel_instr->channel_id()) {
continue;
}
channel_instructions[*channel_instr->channel_id()].push_back(instruction);
switch (instruction->opcode()) {
case HloOpcode::kSend: {
TF_RET_CHECK(instruction->users().size() == 1);
const HloInstruction* send_done = instruction->users().front();
if (send_done->opcode() == HloOpcode::kSendDone) {
TF_RETURN_IF_ERROR(CheckSameChannel(instruction, send_done));
TF_RETURN_IF_ERROR(CheckSameIsHostTransfer(instruction, send_done));
}
break;
}
case HloOpcode::kRecv: {
TF_RET_CHECK(instruction->users().size() == 1);
const HloInstruction* recv_done = instruction->users().front();
if (recv_done->opcode() == HloOpcode::kRecvDone) {
TF_RETURN_IF_ERROR(CheckSameChannel(instruction, recv_done));
TF_RETURN_IF_ERROR(CheckSameIsHostTransfer(instruction, recv_done));
}
break;
}
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
TF_RET_CHECK(instruction->operands().size() == 1);
break;
default:
break;
}
}
}
for (auto& pair : channel_instructions) {
auto& instructions = pair.second;
const HloInstruction* first = instructions[0];
if (const auto* sendrecv = DynCast<HloSendRecvInstruction>(first)) {
absl::flat_hash_set<HloOpcode> opcodes;
for (const HloInstruction* instr : instructions) {
opcodes.insert(instr->opcode());
auto cast = DynCast<HloSendRecvInstruction>(instr);
TF_RET_CHECK(cast != nullptr)
<< "channel " << pair.first
<< " is used for different types of channel instructions";
}
if (sendrecv->is_host_transfer()) {
TF_RET_CHECK(instructions.size() == 2)
<< "channel " << pair.first
<< " is used for multiple host send/recv instructions";
}
} else {
for (const HloInstruction* instr : instructions) {
if (opts.verify_unique_channel_ids) {
TF_RET_CHECK(first->opcode() == instr->opcode())
<< "channel " << pair.first
<< " is used for different types of channel instructions";
}
}
}
}
return absl::OkStatus();
}
absl::Status CheckFusionInstruction(HloInstruction* fusion) {
HloComputation* fused_computation = fusion->fused_instructions_computation();
if (fusion != fused_computation->FusionInstruction()) {
return Internal(
"Instruction of fused computation does not match expected "
"instruction "
"%s.",
fusion->ToString());
}
bool root_owned = false;
const auto& fused_parameters = fusion->fused_parameters();
const HloInstruction* fused_root = fusion->fused_expression_root();
std::vector<bool> parameter_owned(fused_parameters.size(), false);
for (auto* instruction : fused_computation->instructions()) {
if (fused_root == instruction) {
if (root_owned) {
return Internal("Root appears more than once in %s.",
fusion->ToString());
}
root_owned = true;
}
for (int i = 0; i < fused_parameters.size(); ++i) {
if (fused_parameters[i] == instruction) {
if (parameter_owned[i]) {
return Internal("Parameter appears more than once in %s.",
fusion->ToString());
}
parameter_owned[i] = true;
}
}
}
if (!root_owned) {
return Internal("Root not found in computation of %s.", fusion->ToString());
}
for (int i = 0; i < parameter_owned.size(); i++) {
if (!parameter_owned[i]) {
return Internal("Parameter %d not found in computation of %s.", i,
fusion->ToString());
}
}
if (fused_root->user_count() != 0) {
return Internal("Root of %s may not have users.", fusion->ToString());
}
for (auto* instruction :
fusion->fused_instructions_computation()->instructions()) {
if (instruction != fused_root) {
if (instruction->user_count() == 0) {
return Internal("Non-root instruction %s in %s must have users.",
instruction->ToString(), fusion->ToString());
}
for (auto& user : instruction->users()) {
if (fused_computation != user->parent()) {
return Internal(
"Non-root instruction %s in %s may not have external users.",
instruction->ToString(), fusion->ToString());
}
}
}
}
CHECK_GE(fusion->operands().size(), fused_parameters.size());
std::vector<bool> parameter_numbers(fused_parameters.size(), false);
for (auto fused_param : fused_parameters) {
int64_t param_no = fused_param->parameter_number();
if (param_no < 0) {
return Internal("Unexpected negative parameter number %d in %s.",
param_no, fusion->ToString());
}
if (param_no >= fused_parameters.size()) {
return Internal(
"Unexpected parameter number %d in %s: higher then number of "
"parameters %lu.",
param_no, fusion->ToString(), fused_parameters.size());
}
if (parameter_numbers[param_no]) {
return Internal(
"Did not expect parameter number %d more than once in %s.", param_no,
fusion->ToString());
}
parameter_numbers[param_no] = true;
}
for (int i = 0; i < parameter_numbers.size(); i++) {
if (!parameter_numbers[i]) {
return Internal("Did not see parameter number %d in %s.", i,
fusion->ToString());
}
}
TF_RET_CHECK(fusion->called_computations() ==
absl::Span<HloComputation* const>(
{fusion->fused_instructions_computation()}))
<< "Fusion HLO calls computations other than the "
"fused_instructions_computation: "
<< fusion->ToString() << " fusion->fused_instructions_computation(): "
<< fusion->fused_instructions_computation()->ToString()
<< " fusion->called_computations(): "
<< ComputationsToString(fusion->called_computations());
for (const auto& fused : fusion->fused_instructions()) {
TF_RET_CHECK(fused->parent() == fusion->fused_instructions_computation())
<< "Fused HLO was missing a parent: " << fused->ToString()
<< " parent: " << fused->parent()
<< " computation: " << fusion->parent();
}
return absl::OkStatus();
}
absl::Status CheckElementwiseInstruction(HloInstruction* instruction) {
const Shape& out_shape = instruction->shape();
for (HloInstruction* operand : instruction->operands()) {
const Shape& operand_shape = operand->shape();
if (!ShapeUtil::CompatibleIgnoringElementType(operand_shape, out_shape)) {
return FailedPrecondition(
"Implicit broadcast is not allowed in HLO."
"Found different shapes for instruction %s.\n"
"output: %s\noperand: %s\n",
HloOpcodeString(instruction->opcode()),
ShapeUtil::HumanString(out_shape),
ShapeUtil::HumanString(operand_shape));
}
}
if (auto* comparison = DynCast<HloCompareInstruction>(instruction)) {
const Shape& operand_shape = comparison->operand(1)->shape();
PrimitiveType operand_element_type = operand_shape.element_type();
Comparison::Type default_comparison_type =
Comparison::DefaultComparisonType(operand_element_type);
if (primitive_util::IsFloatingPointType(operand_element_type)) {
if (comparison->type() != Comparison::Type::kFloat &&
comparison->type() != Comparison::Type::kFloatTotalOrder) {
return FailedPrecondition(
"Expected comparison type %s or %s.\n"
"actual: %s\noperand: %s\n",
ComparisonTypeToString(Comparison::Type::kFloat),
ComparisonTypeToString(Comparison::Type::kFloatTotalOrder),
ComparisonTypeToString(comparison->type()),
ShapeUtil::HumanString(operand_shape));
}
} else if (comparison->type() != default_comparison_type) {
return FailedPrecondition(
"Expected comparison type %s.\n"
"actual: %s\noperand: %s\n",
ComparisonTypeToString(default_comparison_type),
ComparisonTypeToString(comparison->type()),
ShapeUtil::HumanString(operand_shape));
}
}
return absl::OkStatus();
}
class InstructionVerifier : public DfsHloVisitorWithDefault {
public:
InstructionVerifier(const HloModule* module, const HloVerifierOpts& opts)
: opts_(opts) {
const int64_t num_partitions = module->config().num_partitions();
if (module->config().use_spmd_partitioning() &&
opts.verify_sharding_device_numbers && num_partitions > 1) {
num_devices_ = module->config().num_partitions();
}
}
absl::Status DefaultAction(HloInstruction*) override {
return absl::OkStatus();
}
absl::Status HandleFusion(HloInstruction* fusion) override {
TF_RETURN_IF_ERROR(CheckCallableInstructionThreadName(
fusion, false));
return CheckFusionInstruction(fusion);
}
absl::Status HandleBroadcast(HloInstruction* broadcast) override {
TF_RET_CHECK(broadcast->dimensions().size() ==
broadcast->operand(0)->shape().rank())
<< "Broadcast HLO (" << broadcast->ToShortString()
<< ") has invalid number of dimensions: "
<< broadcast->dimensions().size()
<< " != " << broadcast->operand(0)->shape().rank();
if (opts_.verify_broadcast_dimensions_order) {
TF_RET_CHECK(absl::c_is_sorted(broadcast->dimensions()))
<< "Broadcast dimensions should be ordered, got: "
<< broadcast->ToString();
}
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* c) override {
return absl::OkStatus();
}
absl::Status HandleWhile(HloInstruction* xla_while) override {
auto* while_cond = xla_while->while_condition();
auto* while_body = xla_while->while_body();
if (while_cond->num_parameters() != 1) {
return FailedPrecondition(
"While condition must have exactly 1 parameter; had %d : %s",
while_cond->num_parameters(), while_cond->ToString());
}
if (while_body->num_parameters() != 1) {
return FailedPrecondition(
"While body must have exactly 1 parameter; had %d : %s",
while_body->num_parameters(), while_body->ToString());
}
if (xla_while->operand_count() != 1) {
return FailedPrecondition(
"While loop must have exactly one operand; had %d : %s",
xla_while->operand_count(), xla_while->ToString());
}
TF_RETURN_IF_ERROR(CheckCallableInstructionThreadName(
xla_while, true));
TF_RETURN_IF_ERROR(VerifyConsistentSharding(
xla_while, {xla_while, xla_while->while_body()->root_instruction(),
xla_while->while_body()->parameter_instruction(0),
xla_while->while_condition()->parameter_instruction(0)}));
return absl::OkStatus();
}
absl::Status HandleCall(HloInstruction* call) override {
return CheckCallableInstructionThreadName(
call, true);
}
absl::Status HandleConditional(HloInstruction* conditional) override {
const std::vector<HloComputation*> branch_computations =
conditional->branch_computations();
std::vector<const HloInstruction*> sharding_check_instructions;
sharding_check_instructions.reserve(branch_computations.size() + 1);
sharding_check_instructions.push_back(conditional);
for (const HloComputation* branch_computation : branch_computations) {
if (branch_computation->num_parameters() != 1) {
return FailedPrecondition(
"Branch computation %s of %s must have 1 parameter instead of %d",
branch_computation->name(), conditional->ToString(),
branch_computation->num_parameters());
}
sharding_check_instructions.push_back(
branch_computation->root_instruction());
}
TF_RETURN_IF_ERROR(CheckCallableInstructionThreadName(
conditional, true));
TF_RETURN_IF_ERROR(
VerifyConsistentSharding(conditional, sharding_check_instructions));
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* instruction) override {
return CheckElementwiseInstruction(instruction);
}
absl::Status HandleElementwiseBinary(HloInstruction* instruction) override {
return CheckElementwiseInstruction(instruction);
}
absl::Status HandleGetTupleElement(HloInstruction* gte) override {
TF_RET_CHECK(gte->operand(0)->shape().IsTuple());
return absl::OkStatus();
}
absl::Status HandleTranspose(HloInstruction* transpose) override {
const Shape& shape = transpose->shape();
const HloInstruction* operand = transpose->operand(0);
TF_RET_CHECK(shape.dimensions().size() == transpose->dimensions().size());
TF_RET_CHECK(shape.dimensions().size() ==
transpose->operand(0)->shape().dimensions().size());
TF_RET_CHECK(std::equal(
shape.dimensions().begin(), shape.dimensions().end(),
Permute(operand->shape().dimensions(), transpose->dimensions())
.begin()))
<< "shape: " << shape << ", operand->shape(): " << shape
<< ", dimensions: {" << absl::StrJoin(transpose->dimensions(), ", ")
<< "}";
return absl::OkStatus();
}
absl::Status HandleAllReduce(HloInstruction* crs) override {
if (crs->channel_id().has_value()) {
TF_RET_CHECK(crs->channel_id().value() > 0)
<< "All reduce channel id must be greater than 0 for "
<< crs->ToShortString();
}
return absl::OkStatus();
}
absl::Status HandleReshape(HloInstruction* hlo) override {
if (opts_.verify_reshape_is_bitcast && !hlo->IsFused()) {
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(hlo->operand(0)->shape(), hlo->shape()))
<< "Reshape should be a physical bitcast, got: " << hlo->ToString();
}
return absl::OkStatus();
}
absl::Status HandleCustomCall(HloInstruction* hlo) override {
if (opts_.verify_custom_call_nested_computation_thread_name) {
return CheckCallableInstructionThreadName(
hlo, true);
}
return absl::OkStatus();
}
absl::Status HandleScatter(HloInstruction* scatter) override {
int64_t rank = scatter->operand(0)->shape().rank();
for (int64_t operand_dim :
scatter->scatter_dimension_numbers().scatter_dims_to_operand_dims()) {
if (operand_dim > rank) {
return absl::OutOfRangeError(absl::StrCat(
"The provided scatter_dims_to_operand_dim was out of range.",
" (operand_dim: ", operand_dim, ", rank: ", rank, ")"));
}
}
return absl::OkStatus();
}
absl::Status Preprocess(HloInstruction* instruction) override {
auto [it, inserted] =
instructions_by_name_.emplace(instruction->name(), instruction);
TF_RET_CHECK(inserted) << "HLO has name that is not unique within module:\n"
<< instruction->ToString() << " in computation: "
<< instruction->parent()->name()
<< "\nPrevious HLO with same name:\n"
<< it->second->ToString() << " in computation: "
<< it->second->parent()->name();
if (instruction->has_sharding()) {
absl::Status status =
instruction->sharding().Validate(instruction->shape(), num_devices_);
if (!status.ok()) {
return absl::Status(
status.code(),
absl::StrCat("Invalid sharding for instruction: ",
instruction->ToString(), ": ", status.message()));
}
}
if (instruction->has_to_apply() &&
instruction->to_apply()->execution_thread() !=
instruction->parent()->execution_thread()) {
return Internal(
"%s top_apply computation execution thread does not match (%s vs %s)",
instruction->name(), instruction->to_apply()->execution_thread(),
instruction->parent()->execution_thread());
}
return absl::OkStatus();
}
absl::Status Postprocess(HloInstruction* instruction) override {
if (!opts_.InstructionCanChangeLayout(instruction) &&
LayoutUtil::IsDenseArray(instruction->shape()) &&
instruction->shape().has_layout()) {
const Shape& result_shape = instruction->shape();
const Layout& result_layout = result_shape.layout();
for (HloInstruction* operand : instruction->operands()) {
const Shape& operand_shape = operand->shape();
if (LayoutUtil::IsDenseArray(operand_shape) &&
operand_shape.rank() == result_shape.rank() &&
operand_shape.has_layout()) {
const Layout& operand_layout = operand_shape.layout();
Layout::Equal equal_predicate =
Layout::Equal().IgnoreTiles().IgnoreMemorySpace();
if (instruction->opcode() == HloOpcode::kConvert ||
instruction->opcode() == HloOpcode::kCompare ||
(instruction->opcode() == HloOpcode::kSelect &&
operand_shape.element_type() == PRED)) {
equal_predicate.IgnoreElementSize();
} else if (instruction->opcode() == HloOpcode::kDynamicSlice ||
instruction->opcode() == HloOpcode::kDynamicUpdateSlice ||
instruction->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(HostOffloadInstructionCanChangeMemorySpace(
instruction, operand_layout.memory_space(),
result_layout.memory_space()));
equal_predicate.IgnoreMemorySpace();
}
TF_RET_CHECK(equal_predicate(result_layout, operand_layout))
<< "Instruction shouldn't change layouts "
<< instruction->ToString() << " From " << result_shape << " To "
<< operand_shape;
}
}
}
return absl::OkStatus();
}
private:
static absl::Status VerifyConsistentSharding(
const HloInstruction* parent,
absl::Span<const HloInstruction* const> instructions) {
const HloInstruction* common_sharding_inst = nullptr;
for (const HloInstruction* check_inst : instructions) {
if (!check_inst->has_sharding()) {
continue;
}
if (!common_sharding_inst) {
common_sharding_inst = check_inst;
continue;
}
TF_RET_CHECK(check_inst->sharding() == common_sharding_inst->sharding())
<< "Inconsistent " << parent->opcode()
<< " sharding among instructions: \n"
<< common_sharding_inst->ToString() << "\n"
<< check_inst->ToString();
}
return absl::OkStatus();
}
static absl::Status HostOffloadInstructionCanChangeMemorySpace(
const HloInstruction* instruction, const int64_t operand_memory_space,
const int64_t result_memory_space) {
TF_RET_CHECK(!(operand_memory_space == Layout::kGenericFastMemorySpace &&
result_memory_space != Layout::kGenericFastMemorySpace) ||
(operand_memory_space != Layout::kGenericFastMemorySpace &&
result_memory_space == Layout::kGenericFastMemorySpace))
<< "Instruction shouldn't change layout memory space between generic "
"fast memory space and others for instruction: "
<< instruction->ToString();
if (instruction->opcode() == HloOpcode::kDynamicSlice) {
TF_RET_CHECK(!(operand_memory_space == Layout::kDefaultMemorySpace &&
result_memory_space == Layout::kHostMemorySpace))
<< "DynamicSlice instruction shouldn't change layout memory "
<< "space from device to host: " << instruction->ToString();
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
TF_RET_CHECK(!(operand_memory_space == Layout::kHostMemorySpace &&
result_memory_space == Layout::kDefaultMemorySpace))
<< "DynamicUpdateSlice instruction shouldn't change layout "
<< "memory space from host to device: " << instruction->ToString();
} else if (instruction->opcode() != HloOpcode::kCopy) {
return absl::InvalidArgumentError(
absl::StrCat("Instruction shouldn't change layout memory space: ",
instruction->ToString()));
}
return absl::OkStatus();
}
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name_;
const HloVerifierOpts& opts_;
std::optional<int64_t> num_devices_;
};
}
absl::StatusOr<bool> HloVerifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto disabled = module->config().debug_options().xla_disable_hlo_passes();
if (std::find(disabled.begin(), disabled.end(), name()) != disabled.end()) {
return false;
}
auto status_or_changed = [&]() -> absl::StatusOr<bool> {
TF_RET_CHECK(!module->name().empty());
if (module->entry_computation()->IsFusionComputation()) {
return InvalidArgument(
"Module entry computation cannot be a fusion computation");
}
TF_RETURN_IF_ERROR(VerifyHloStructure(module));
TF_RETURN_IF_ERROR(VerifyAsynchronousInstructionPairs(*module));
TF_RETURN_IF_ERROR(
VerifyChannels(*module, target_metadata_->GetVerifierOpts()));
TF_RETURN_IF_ERROR(VerifyInstructionNameUnchanged(
*module, target_metadata_->GetVerifierOpts()));
std::unique_ptr<ShapeVerifier> shape_verifier =
target_metadata_->GetVerifier();
InstructionVerifier instruction_verifier(
module, target_metadata_->GetVerifierOpts());
for (auto* computation : module->computations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(shape_verifier.get()));
TF_RETURN_IF_ERROR(computation->Accept(&instruction_verifier));
if (computation->IsAsyncComputation()) {
TF_RETURN_IF_ERROR(VerifyAsyncComputation(computation));
}
}
TF_RETURN_IF_ERROR(shape_verifier->VerifyEntryComputationLayout(*module));
if (module->has_schedule()) {
TF_RETURN_IF_ERROR(module->schedule().Verify());
}
if (HloInstruction::IsThreadIncluded(
module->entry_computation()->execution_thread(),
execution_threads)) {
TF_RETURN_IF_ERROR(module->input_output_alias_config().Verify(
*module, [this](const Shape& shape) -> int64_t {
if (target_metadata_->GetVerifierOpts().IsLayoutSensitive()) {
return target_metadata_->GetVerifierOpts().ShapeSize(shape);
} else {
return 0;
}
}));
}
TF_RETURN_IF_ERROR(module->buffer_donor_config().Verify(*module));
TF_RETURN_IF_ERROR(VerifyLayoutConstrainedAllReduce(*module));
return false;
}();
if (status_or_changed.ok()) {
return status_or_changed.value();
}
return absl::Status(status_or_changed.status().code(),
absl::StrCat("during context [", context_, "]: ",
status_or_changed.status().message()));
}
MetadataTracker::MetadataTracker(absl::string_view prefix) : prefix_(prefix) {}
MetadataTracker::~MetadataTracker() {
if (instruction_count_ == 0) {
return;
}
const std::map<std::string, double> values = {
{"instruction_count", 1.0 * instruction_count_},
{"op_type_coverage", 1.0 * has_op_type_count_ / instruction_count_},
{"op_name_coverage", 1.0 * has_op_name_count_ / instruction_count_},
{"source_file_coverage",
1.0 * has_source_file_count_ / instruction_count_},
{"dummy_source_file_coverage",
1.0 * has_dummy_source_file_count_ / instruction_count_},
{"source_line_coverage",
1.0 * has_source_line_count_ / instruction_count_},
{"creation_pass_coverage",
1.0 * has_creation_pass_id_count_ / instruction_count_},
{"logical_creation_pass_coverage",
1.0 * has_logical_creation_pass_id_count_ / instruction_count_},
{"size_of_generated_code_in_bytes_coverage",
1.0 * has_size_of_generated_code_in_bytes_count_ / instruction_count_},
{"size_of_memory_working_set_in_bytes_coverage",
1.0 * has_size_of_memory_working_set_in_bytes_count_ /
instruction_count_},
{"profile_info_coverage",
1.0 * has_profile_info_count_ / instruction_count_}};
LOG(INFO) << prefix_ << " "
<< absl::StrJoin(values, ",", absl::PairFormatter("="));
}
void MetadataTracker::HandleMetadata(const OpMetadata& metadata) {
++instruction_count_;
if (!metadata.op_type().empty()) {
++has_op_type_count_;
}
if (!metadata.op_name().empty()) {
++has_op_name_count_;
}
if (!metadata.source_file().empty()) {
++has_source_file_count_;
if (absl::StrContains(metadata.source_file(), "dummy")) {
++has_dummy_source_file_count_;
}
}
if (metadata.source_line() != 0) {
++has_source_line_count_;
}
if (metadata.size_of_generated_code_in_bytes() != 0) {
++has_size_of_generated_code_in_bytes_count_;
}
if (metadata.size_of_memory_working_set_in_bytes() != 0) {
++has_size_of_memory_working_set_in_bytes_count_;
}
if (metadata.has_profile_info()) {
++has_profile_info_count_;
}
}
absl::Status MetadataTracker::DefaultAction(HloInstruction* instruction) {
HandleMetadata(instruction->metadata());
return absl::OkStatus();
}
} | #include "xla/service/hlo_verifier.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/log_severity.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/layout_assignment.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
std::unique_ptr<HloModule> CreateUnverifiedModule() {
return std::make_unique<HloModule>("module", HloModuleConfig());
}
class HloVerifierTest : public HloTestBase {
public:
HloVerifierTest()
: HloTestBase(false,
false) {}
};
class HloVerifierTestAllowMixedPrecision : public HloTestBase {
public:
HloVerifierTestAllowMixedPrecision()
: HloTestBase(false,
true) {}
};
class HloVerifierTestLayoutSensitive : public HloTestBase {
public:
HloVerifierTestLayoutSensitive()
: HloTestBase(true,
false,
LayoutAssignment::InstructionCanChangeLayout) {}
};
class HloVerifierTestLayoutSensitiveAndAllowMixedPrecision
: public HloTestBase {
public:
HloVerifierTestLayoutSensitiveAndAllowMixedPrecision()
: HloTestBase(true,
true,
LayoutAssignment::InstructionCanChangeLayout) {}
};
class HloVerifierTestLayoutFusion : public HloTestBase {
public:
HloVerifierTestLayoutFusion()
: HloTestBase(true,
false) {}
};
TEST_F(HloVerifierTest, NullInstructionParent) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
negate->set_parent(nullptr);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("has a null parent pointer"));
}
TEST_F(HloVerifierTest, NullComputationParent) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
computation->set_parent(nullptr);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("has a null parent pointer"));
}
TEST_F(HloVerifierTest, DifferentOperandParents) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
HloComputation::Builder emb_builder(TestName());
HloInstruction* emb_param = emb_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
module->AddEmbeddedComputation(emb_builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
TF_ASSERT_OK(negate->ReplaceOperandWith(0, emb_param));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("is in a different computation"));
}
TEST_F(HloVerifierTest, ResetsShapeVerifierState) {
HloComputation::Builder builder(TestName());
Shape s1 = ShapeUtil::MakeShape(F32, {1});
Shape s2 = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(s2, HloOpcode::kAdd, param, param));
builder.AddInstruction(
HloInstruction::CreateBinary(s2, HloOpcode::kMultiply, add, add));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
}
TEST_F(HloVerifierTest, CheckCallOperandParameterShapesMismatch) {
const char* const hlo_string = R"(
HloModule Module
callme {
ROOT param = (s32[], f32[4]) parameter(0)
}
ENTRY entry {
p0 = (f32[4], s32[]) parameter(0)
ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("shape does not match parameter"));
}
TEST_F(HloVerifierTest, CheckCallThreadMismatch) {
constexpr absl::string_view hlo = R"(
HloModule Module
callme {
ROOT param = (s32[], f32[4]) parameter(0)
}, execution_thread="parallel_thread"
ENTRY entry {
p0 = (s32[], f32[4]) parameter(0)
ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("mycall top_apply computation execution thread does "
"not match (parallel_thread vs main)"));
}
TEST_F(HloVerifierTest, CompositeCall) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.name="foo.bar",composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.version="1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallMissingFrontendAttributes) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("A composite call op must have frontend attributes"));
}
TEST_F(HloVerifierTest, CompositeCallOptionalAttributesAndVersion) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.name="foo.bar"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallOptionalAttributes) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.name="foo.bar",composite.version="1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallMissingName) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.version="1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("A composite call op must have frontend attributes "
"with key composite.name whose value is non-empty"));
}
TEST_F(HloVerifierTest, CompositeCallOptionalVersion) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallNonNegativeVersion) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="-1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("A composite call op must have frontend attributes with a "
"composite.version whose value is a non-negative integer"));
}
TEST_F(HloVerifierTest, CheckConditionalOperandParameterShapesMismatch) {
const char* const hlo_string = R"(
HloModule Module
true_branch {
tparam = (s32[], f32[4]) parameter(0)
ROOT tgte1 = f32[4] get-tuple-element(tparam), index=1
}
false_branch {
fparam = (s32[], f32[4]) parameter(0)
ROOT fgte1 = f32[4] get-tuple-element(fparam), index=1
}
ENTRY entry {
p0 = (f32[4], s32[]) parameter(0)
constant = pred[] constant(true)
ROOT conditional = f32[4] conditional(constant, p0, p0),
true_computation=true_branch, false_computation=false_branch
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("shape does not match parameter"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchIndexOperandShape) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
HloInstruction* condition = FindInstruction(module.get(), "b0");
*condition->mutable_shape() = ShapeUtil::MakeShape(F32, {});
status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"first operand of indexed conditional must be a scalar of S32"));
*condition->mutable_shape() = ShapeUtil::MakeShape(S32, {4});
status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("first operand of conditional must be a scalar"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchThread) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}, execution_thread="parallel_thread"
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
EXPECT_THAT(status.message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchContainsAsyncThread) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) custom-call-start(f32[4] fparam), async_execution_thread="parallel_thread", custom_call_target="foo"
ROOT %async-done = f32[4] custom-call-done(((f32[4]), f32[4], s32[]) %async-start)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, RngOpnd0NotScalar) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOpnd0NotScalar {
constant.0 = f32[] constant(0)
constant.1 = f16[2] constant({1, 3})
ROOT rng.0 = f32[10]{0} rng(f32[] constant.0, f16[2] constant.1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected scalar type"));
}
TEST_F(HloVerifierTest, RngOperandElementTypesDoNotMatch) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f16[] constant(1)
ROOT rng.0 = f32[10]{0} rng(f32[] constant.0, f16[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected compatible element types"));
}
TEST_F(HloVerifierTest, RngMixedPrecisionNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngResultElementTypeNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng.0 = f16[10]{0} rng(f32[] constant.0, f32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected compatible element types"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, RngMixedPrecisionAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngResultElementTypeNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng.0 = f16[10]{0} rng(f32[] constant.0, f32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, RngElementTypeNotSupported) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngElementTypeNotSupported {
constant.0 = s32[] constant(0)
constant.1 = s32[] constant(1)
ROOT rng.0 = s32[10]{0} rng(s32[] constant.0, s32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Element type not supported"));
}
TEST_F(HloVerifierTest, NegativeInteriorPaddingNotAllowed) {
HloComputation::Builder builder(TestName());
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param"));
PaddingConfig padding_config;
padding_config.add_dimensions()->set_interior_padding(-1);
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {100}), param,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32))),
padding_config));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Interior padding cannot be negative"));
}
TEST_F(HloVerifierTest, PadNegativeInteriorDilationNotAllowed) {
HloComputation::Builder builder(TestName());
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param"));
PaddingConfig padding_config;
padding_config.add_dimensions()->set_interior_padding(-1);
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {100}), param,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32).Clone())),
padding_config));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Interior padding cannot be negative"));
}
TEST_F(HloVerifierTest, DotMixedPrecisionAllowed) {
static const char* const kDotHloString = R"(
HloModule module
ENTRY entry_computation {
a = f32[2,10] parameter(0)
b = bf16[10,2] parameter(1)
ROOT dot = f32[2,2] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDotHloString));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok()) << status;
}
static const char* const kConvHloString = R"(
HloModule module
ENTRY entry_computation {
param0 = f16[128,128,56,56] parameter(0)
param1 = f16[3,3,128,128] parameter(1)
zero_f16 = f16[] constant(0)
ROOT conv = f16[128,128,28,28] convolution(param0, param1),
window={size=3x3 stride=2x2}, dim_labels=bf01_01io->bf01
})";
TEST_F(HloVerifierTest, ConvNegativeWindowDilationNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kConvHloString));
auto* conv = module->entry_computation()->root_instruction();
Window w = conv->window();
w.mutable_dimensions(0)->set_window_dilation(-1);
conv->set_window(w);
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("non-positive window dilation factor"));
}
TEST_F(HloVerifierTest, ConvNegativeBaseDilationNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kConvHloString));
auto* conv = module->entry_computation()->root_instruction();
Window w = conv->window();
w.mutable_dimensions(0)->set_base_dilation(-1);
conv->set_window(w);
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("non-positive base area dilation factor"));
}
static const char* const kAddWithLayoutChangeHlo = R"(
HloModule AddWithLayoutChange
ENTRY AddWithLayoutChange {
par0 = f32[3,4]{1,0} parameter(0)
par1 = f32[3,4]{0,1} parameter(1)
ROOT add0 = f32[3,4]{1,0} add(par0,par1)
}
)";
TEST_F(HloVerifierTest, AddWithLayoutChange) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kAddWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, ScalarIndexDynamicSlice) {
const char* const kScalarIndexDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258] parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258] dynamic-slice(s32[2,2,258] %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kScalarIndexDynamicSlice, config));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, ScalarIndexDynamicUpdateSlice) {
const char* const kScalarIndexDynamicSlice = R"(
HloModule DynamicUpdateSlice_module
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kScalarIndexDynamicSlice, config));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestAllowMixedPrecision, DynamicUpdateSliceMixedPrecision) {
const char* const kDynamicUpdateSliceMixedPrecision = R"(
HloModule kDynamicUpdateSliceMixedPrecision
ENTRY %entry (parameter.0: f32[32,511,2048], parameter.1: bf16[32,511,512], parameter.2: s32[], parameter.3: s32[], parameter.4: s32[]) -> bf16[32,511,2048] {
%parameter.0 = f32[32,511,2048] parameter(0)
%parameter.1 = bf16[32,511,512] parameter(1)
%parameter.2 = s32[] parameter(2)
%parameter.3 = s32[] parameter(3)
%parameter.4 = s32[] parameter(4)
ROOT %dus = bf16[32,511,2048] dynamic-update-slice(f32[32,511,2048] %parameter.0, bf16[32,511,512] %parameter.1, s32[] %parameter.2, s32[] %parameter.3, s32[] %parameter.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(
kDynamicUpdateSliceMixedPrecision));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[32,511,2048], actual shape is bf16[32,511,2048]"));
}
TEST_F(HloVerifierTestLayoutSensitive, AddWithLayoutChangeNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kAddWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, SliceWithLayoutChangeNotAllowed) {
const char* const kSliceWithLayoutChangeHlo = R"(
HloModule SliceWithLayoutChange
ENTRY SliceWithLayoutChange {
par0 = f32[4,5]{0,1} parameter(0)
par1 = s32[] parameter(1)
par2 = s32[] parameter(2)
ROOT dslice0 = f32[3,4]{1,0} dynamic-slice(par0, par1, par2),
dynamic_slice_sizes={3,4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kSliceWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, ConcatWithLayoutChangeNotAllowed) {
const char* const kConcatWithLayoutChangeHlo = R"(
HloModule ConcatWithLayoutChange
ENTRY ConcatWithLayoutChange {
par0 = f32[3,5]{0,1} parameter(0)
par1 = f32[3,3]{1,0} parameter(1)
ROOT concat0 = f32[3,8]{1,0} concatenate(f32[3,5] par0, f32[3,3] par1),
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kConcatWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, BitcastNeedsSameNumberOfElements) {
const char* const hlo_string = R"(
HloModule Module
ENTRY BitcastNeedsToBeNoOp {
constant.0 = f32[2] constant({0.0, 0.0})
ROOT bitcast = f32[3] bitcast(constant.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Bitcast cannot have different shape sizes of output "
"(12) and operand (8)"));
}
TEST_F(HloVerifierTest, SelectMixedPrecisionNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectMixedPrecisionNotAllowed {
p0 = pred[32] parameter(0)
p1 = f32[32] parameter(1)
p2 = bf16[32] parameter(2)
ROOT select = f32[32] select(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Seen floating point types of different precisions"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, SelectMixedPrecisionAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectMixedPrecisionAllowed {
p0 = pred[32] parameter(0)
p1 = f32[32] parameter(1)
p2 = bf16[32] parameter(2)
ROOT select = f32[32] select(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, SelectTupleNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectWithTuple {
p0 = (f32[], f32[]) parameter(0)
p1 = (f32[], f32[]) parameter(1)
p2 = pred[] parameter(2)
ROOT select = (f32[], f32[]) select(p2, p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected array argument for select"));
}
TEST_F(HloVerifierTestLayoutSensitive, CopyStartAndCopyDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive, CopyStartAndCopyDoneWrongLayout) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{0,1:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to"));
}
TEST_F(HloVerifierTest, CopyStartAndCopyDoneWrongType) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
copy-start = f32[2,3] copy-start(p0)
ROOT copy-done = f32[2,3] copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"(f32[2,3], f32[2,3], u32[])"));
}
TEST_F(HloVerifierTest, CopyStartMultipleCopyDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
copy-start = (f32[2,3], f32[2,3], u32[]) copy-start(p0)
copy-done.1 = f32[2,3] copy-done(copy-start)
copy-done.2 = f32[2,3] copy-done(copy-start)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(copy-done.1, copy-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("copy-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, CopyDoneNoCopyStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = (f32[2,3], f32[2,3], u32[]) tuple(p0, p0, p1)
ROOT copy-done = f32[2,3] copy-done(tuple)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a copy-done instruction needs to be "
"copy-start, found tuple"));
}
TEST_F(HloVerifierTestLayoutSensitive, AsyncStartAndAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive, AsyncStartAndAsyncUpdateAndAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncUpdateAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), custom_call_target="foo"
async-update.1 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-start)
async-update.2 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-update.1)
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-update.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
AsyncStartAndAsyncUpdateAndAsyncDoneWithThreadName) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncUpdateAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), async_execution_thread="parallel_thread", custom_call_target="foo"
async-update.1 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-start)
async-update.2 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-update.1)
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-update.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, AsyncStartAndAsyncDoneWrongType) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3] custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-done expects the shape of output to match the "
"async shape at index {1}"));
}
TEST_F(HloVerifierTest, AsyncStartMultipleAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-done.1 = f32[2,3] custom-call-done(async-start)
async-done.2 = f32[2,3] custom-call-done(async-start)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(async-done.1, async-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("async-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, AsyncStartNoAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
ROOT async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("async-start instruction requires one consumer, found 0"));
}
TEST_F(HloVerifierTest, AsyncStartAndAsyncUpdateNoAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("async-update instruction requires one consumer, found 0"));
}
TEST_F(HloVerifierTest, AsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncDoneNoAsyncStart {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3] custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TF_ASSERT_OK(async_done->ReplaceOperandWith(0, tuple));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
HloComputation* computation =
FindComputation(module.get(), "AsyncDoneNoAsyncStart");
TF_ASSERT_OK(computation->RemoveInstruction(async_start));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a async-done instruction needs to be "
"async-start or async-update, found tuple"));
}
TEST_F(HloVerifierTest, AsyncUpdateAndAsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncUpdateAndAsyncDoneNoAsyncStart {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
ROOT async-done = f32[2,3] custom-call-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
HloInstruction* async_update = FindInstruction(module.get(), "async-update");
TF_ASSERT_OK(async_update->ReplaceOperandWith(0, tuple));
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TF_ASSERT_OK(async_done->ReplaceOperandWith(0, tuple));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
HloComputation* computation =
FindComputation(module.get(), "AsyncUpdateAndAsyncDoneNoAsyncStart");
TF_ASSERT_OK(computation->RemoveInstruction(async_start));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a async-update instruction needs to be "
"async-start or async-update, found tuple"));
}
TEST_F(HloVerifierTest, AsyncOpComputationParamWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p0 = f32[2,3] parameter(0)
ROOT p1 = f32[3,2] parameter(1)
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = f32[3,2] parameter(1)
async-start = ((f32[3,2], f32[3,2]), f32[3,2], u32[]) async-start(p0, p1), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the async shape at index {0} to "
"match async computation parameter shape"));
}
TEST_F(HloVerifierTest, AsyncOpComputationRootWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p0 = f32[2,3] parameter(0)
ROOT p1 = f32[3,2] parameter(1)
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = f32[3,2] parameter(1)
async-start = ((f32[2,3], f32[3,2]), f32[2,3], u32[]) async-start(p0, p1), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the async shape at index {1} to "
"match the async computation root shape"));
}
TEST_F(HloVerifierTest, AsyncOpTupleWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], s32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
async_start->mutable_shape()->clear_tuple_shapes();
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the async shape to be a tuple of "
"at least two elements"));
}
TEST_F(HloVerifierTest, AsyncStartOperandWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[3,2] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the shape of operand 0 to match "
"the async shape at index {0}"));
}
TEST_F(HloVerifierTest, AsyncDoneOutputWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[2,3] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-done expects the shape of output to match the "
"async shape at index {1}"));
}
TEST_F(HloVerifierTest, AsyncUpdateWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
async-update = ((f32[3,2]), f32[3,2], u32[]) async-update(async-start), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-update), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"async-update expects the shape of operand and output to match"));
}
TEST_F(HloVerifierTest, AsyncOpComputationNotTrivial) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
copy = f32[2,3] copy(p)
ROOT custom-call = f32[3,2] custom-call(copy), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"expected to contain only the root and parameter instructions"));
}
TEST_F(HloVerifierTest, IotaNonArrayResult) {
const char* const hlo_string = R"(
HloModule IotaTupleResult
ENTRY kernelEntry {
ROOT iota = () iota(), iota_dimension=24
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("does not support non-array result"));
}
TEST_F(HloVerifierTest, IotaNegativeDimension) {
const char* const hlo_string = R"(
HloModule IotaTupleResult
ENTRY kernelEntry {
ROOT iota = s32[128,1001]{1,0} iota(), iota_dimension=-1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("negative"));
}
TEST_F(HloVerifierTest, IotaPredResultNotAllowed) {
const char* const hlo_string = R"(
HloModule IotaPredResult
ENTRY kernelEntry {
ROOT iota = pred[128] iota(), iota_dimension=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("got PRED"));
}
static const char* const kMapOperandComputationMismatchHlo = R"(
HloModule MapOperandComputationMismatch
Computation {
param0 = f32[] parameter(0)
constant = f32[] constant(1)
ROOT add = f32[] add(param0, constant)
}
ENTRY kernelEntry {
param = f64[] parameter(0)
ROOT map = f32[] map(param), dimensions={}, to_apply=Computation
})";
TEST_F(HloVerifierTest, MapOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(
kMapOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"Shape mismatch between to_apply computation parameter and operand"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, MapOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kMapOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
static const char* const kReduceOperandComputationMismatchHlo = R"(
HloModule ReduceOperandComputationMismatch
computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY kernelEntry {
arg0 = f16[64,64,224,224]{3,2,1,0} parameter(0)
constant = f16[] constant(0)
reduce = f16[64]{0} reduce(arg0, constant), dimensions={0,2,3}, to_apply=computation
})";
TEST_F(HloVerifierTest, ReduceOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnUnverifiedModule(kReduceOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to f32[64]"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, ReduceOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(kReduceOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
std::string ReplicaGroupsStr(std::vector<std::vector<int64_t>> replica_groups) {
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups.size());
for (const auto& g : replica_groups) {
replica_group_strs.push_back(
absl::StrFormat("{%s}", absl::StrJoin(g, ",")));
}
return absl::StrFormat("{%s}", absl::StrJoin(replica_group_strs, ", "));
}
int64_t ReplicaCount(const std::vector<std::vector<int64_t>>& replica_groups) {
int64_t replica_count = 0;
for (auto group : replica_groups) {
replica_count += group.size();
}
return replica_count;
}
absl::StatusOr<std::unique_ptr<HloModule>> MakeCollectiveCommOpComputation(
std::vector<std::vector<int64_t>> replica_groups,
std::optional<int64_t> replica_count, std::optional<int64_t> num_partitions,
absl::string_view other_attributes, absl::string_view template_str) {
HloModuleConfig config;
config.set_replica_count(
replica_count.value_or(ReplicaCount(replica_groups)));
config.set_num_partitions(num_partitions.value_or(1));
return ParseAndReturnUnverifiedModule(
absl::StrReplaceAll(
template_str,
{{"REPLICA_GROUPS", ReplicaGroupsStr(replica_groups)},
{"OTHER_ATTRIBUTES", other_attributes.empty()
? ""
: absl::StrCat(",", other_attributes)}}),
config);
}
absl::StatusOr<std::unique_ptr<HloModule>> MakeAllReduceComputation(
std::vector<std::vector<int64_t>> replica_groups,
std::optional<int64_t> replica_count = std::nullopt,
std::optional<int64_t> num_partitions = std::nullopt,
absl::string_view other_attributes = "") {
const char* kTemplate = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p = f32[128]{0} parameter(0)
crs = f32[128]{0} all-reduce(p), to_apply=add, replica_groups=REPLICA_GROUPS
OTHER_ATTRIBUTES
})";
return MakeCollectiveCommOpComputation(replica_groups, replica_count,
num_partitions, other_attributes,
kTemplate);
}
TEST_F(HloVerifierTest, AllReduce_NoReplicaGroupsOK) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllReduceComputation({}));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduce_DifferentGroupSizesOk) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0}, {1, 3}, {2}}));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduce_EmptyReplicaGroup) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllReduceComputation({{0}, {}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("empty replica group"));
}
TEST_F(HloVerifierTest, AllReduce_RepeatedReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}, {4, 0}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 0 is repeated"));
}
TEST_F(HloVerifierTest, AllReduce_MissingReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}, {5, 6}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 4 is not named"));
}
TEST_F(HloVerifierTest, AllReduce_NotEnougReplicasInGroupConfig) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllReduceComputation({{0, 1}}, 8));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kCrossReplica mode, replica groups should contain "
"8 replicas, but found 2"));
}
TEST_F(HloVerifierTest, AllReduce_TooManyReplicasInGroupConfig) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 2));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kCrossReplica mode, replica groups should contain "
"2 replicas, but found 4"));
}
TEST_F(HloVerifierTest, AllReduce_CrossReplicaAndPartition_Invalid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 2, 1, "channel_id=1"));
EXPECT_THAT(
verifier().Run(module.get()).status().message(),
HasSubstr(
"In kCrossReplicaAndPartition mode, replica groups should contain "
"2 replicas, but found 4"));
}
TEST_F(HloVerifierTest, AllReduce_CrossReplicaAndPartition_Valid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 4, 1, "channel_id=1"));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduce_FlattenedID_Invalid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 1, 2,
"channel_id=1, use_global_device_ids=true"));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kFlattenedID mode, replica groups should contain "
"2 flattened IDs, but found 4"));
}
TEST_F(HloVerifierTest, AllReduce_FlattenedID_Valid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 2, 2,
"channel_id=1, use_global_device_ids=true"));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduceStartAndDone) {
const char* const kModuleStr = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
start = f32[2,3] all-reduce-start(p0), to_apply=add
ROOT done = f32[2,3] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, AllReduceStartAndDoneWrongType) {
const char* const kModuleStr = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
start = (f32[2,3], f32[2,3]) all-reduce-start(p0), to_apply=add
ROOT done = f32[2,3] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[2,3]"));
}
TEST_F(HloVerifierTest, AllReduceStartAndMultipleDone) {
const char* const kModuleStr = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
start = (f32[2,3], f32[2,3]) all-reduce-start(p0), to_apply=add
done1 = f32[2,3] all-reduce-done(start)
ROOT done2 = f32[2,3] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("all-reduce-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, AllReduceDoneWithoutStart) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = (f32[2,3], f32[2,3], u32[], u32[]) tuple(p0, p0, p1, p1)
ROOT done = f32[2,3] all-reduce-done(tuple)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a all-reduce-done instruction "
"needs to be all-reduce-start, found tuple"));
}
absl::StatusOr<std::unique_ptr<HloModule>> MakeAllToAllComputation(
std::vector<std::vector<int64_t>> replica_groups,
std::optional<int64_t> replica_count = std::nullopt,
std::optional<int64_t> num_partitions = std::nullopt,
absl::string_view other_attributes = "") {
const char* kTemplate = R"(
HloModule test
ENTRY entry {
p0 = f32[128]{0} parameter(0)
p1 = f32[128]{0} parameter(1)
a2a = (f32[128], f32[128]) all-to-all(p0, p1), replica_groups=REPLICA_GROUPS
OTHER_ATTRIBUTES
})";
return MakeCollectiveCommOpComputation(replica_groups, replica_count,
num_partitions, other_attributes,
kTemplate);
}
TEST_F(HloVerifierTest, AllToAll_NoReplicaGroupsOK) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllToAllComputation({}, 2));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllToAll_EmptyReplicaGroup) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllToAllComputation({{0, 1}, {}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("cannot have an empty replica group"));
}
TEST_F(HloVerifierTest, AllToAll_RepeatedReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}, {4, 0}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 0 is repeated"));
}
TEST_F(HloVerifierTest, AllToAll_MissingReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}, {5, 6}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 4 is not named"));
}
TEST_F(HloVerifierTest, AllToAll_UniformSizeOfReplicasInGroup) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllToAllComputation({{0, 1}, {2}, {3, 4}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica groups expected to be of uniform size"));
}
TEST_F(HloVerifierTest, AllToAll_CrossPartition_Invalid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}}, 1, 2, "channel_id=1"));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kCrossPartition mode, replica groups should "
"contain 2 partitions, but found 4"));
}
TEST_F(HloVerifierTest, AllToAll_CrossPartition_Valid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}}, 1, 4, "channel_id=1"));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllToAll_LayoutConstrained) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128,4]{0,1} parameter(0)
p1 = f32[128,4]{1,0} parameter(1)
ROOT a2a = (f32[128,4]{0,1}, f32[128,4]{1,0}) all-to-all(p0, p1),
replica_groups={{0,1}}
}
)";
HloModuleConfig config;
config.set_replica_count(2);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("HLO all-to-all has operands with different shapes"));
}
TEST_F(HloVerifierTest, AllToAll_OperandCountMismatchWithReplicaGroupSize) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128,4] parameter(0)
p1 = f32[128,4] parameter(1)
ROOT a2a = (f32[128,4], f32[128,4], f32[128,4]) all-to-all(p0, p1, p1),
replica_groups={{0,1}}
}
)";
HloModuleConfig config;
config.set_replica_count(2);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("hlo->operand_count() == split_count"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameSourceTwice) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,1}, {0,2}, {1,0}}
}
)";
HloModuleConfig config;
config.set_replica_count(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Source 0 appears more than once"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameTargetTwice) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,2}, {1,2}, {2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Target 2 appears more than once"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameSourceTooManyTimes) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
ROOT collective-permute = u32[2,8,128]{2,1,0:T(2,128)} collective-permute(u32[2,8,128] broadcast.0, u32[2,8,128] broadcast.1, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{0,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Source 0 appears more than 2 times in instruction's "
"source-target pairs:"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameTargetTooManyTimes) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
ROOT collective-permute = u32[2,8,128]{2,1,0:T(2,128)} collective-permute(u32[2,8,128] broadcast.0, u32[2,8,128] broadcast.1, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,3},{1,0}}, slice_sizes={{1,8,128},{1,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Target 3 appears more than 2 times in instruction's "
"source-target pairs:"));
}
TEST_F(HloVerifierTest, CollectivePermuteUnmatchingSourceTarget) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.5)
tuple.9 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.6)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Unmatching input buffers and output buffers"));
}
TEST_F(HloVerifierTest, CollectivePermuteUnmatchingInputAndInputOffset) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, u32[2,8,128]{2,1,0:T(2,128)} broadcast.0)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.5)
tuple.9 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.6)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (s32[],s32[],s32[]) tuple.3, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Unmatching input buffers and input offset."));
}
TEST_F(HloVerifierTest, CollectivePermuteUnmatchingOutputAndOutputOffset) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, u32[2,8,128]{2,1,0:T(2,128)} broadcast.0)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.7 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.2)
tuple.8 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.7)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.8, (s32[],s32[],s32[]) tuple.2), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Unmatching output buffers and output offset."));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossReplicaSourceOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{5,2}, {1,2}, {2,0}}
}
)";
HloModuleConfig config;
config.set_replica_count(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Source 5"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossReplicaTargetOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,1}, {1,2}, {2,7}}
}
)";
HloModuleConfig config;
config.set_replica_count(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Target 7"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossPartitionSourceOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{5,2}, {1,2}, {2,0}}, channel_id=1
}
)";
HloModuleConfig config;
config.set_num_partitions(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Source 5"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossPartitionTargetOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,2}, {1,7}, {2,0}}, channel_id=1
}
)";
HloModuleConfig config;
config.set_num_partitions(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Target 7"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, FusionMoreOperandsThanParameters) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
ROOT p0 = f32[10] parameter(0)
}
ENTRY entry {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT out = f32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, FusionLessOperandsThanParameters) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT out = f32[10] add(p0, p1)
}
ENTRY entry {
p0 = f32[10] parameter(0)
ROOT out = f32[10] fusion(p0), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("greater than the number of operands"));
}
TEST_F(HloVerifierTest, FusionShapeVerifier) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
ROOT p0 = f32[10,10] parameter(0)
}
ENTRY entry {
p0 = f32[10,10] parameter(0)
ROOT out = f32[10] fusion(p0), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Fused computation shape"));
}
TEST_F(HloVerifierTest, FusionThreadVerifier) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
ROOT p0 = f32[8,12] parameter(0)
}, execution_thread="parallel_thread"
ENTRY entry {
p0 = f32[8,12] parameter(0)
ROOT out = f32[8,12] fusion(p0), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, FusionNestedComputationThreadVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}, execution_thread="parallel_thread"
fused_computation {
p0 = f32[8,12] parameter(0)
p1 = f32[8,12] parameter(1)
crs0 = f32[8,12] all-reduce(p1), replica_groups={}, to_apply=add
ROOT result = add(p0, crs0)
}
ENTRY entry {
p0 = f32[8,12] parameter(0)
p1 = f32[8,12] parameter(1)
ROOT out = f32[8,12] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(
verifier().Run(module.get()).status().message(),
HasSubstr("crs0 top_apply computation execution thread does not match "
"(parallel_thread vs main)"));
}
TEST_F(HloVerifierTest, AllReduceVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
input = f32[8,12]{0,1} parameter(0)
crs0 = f32[8,12]{0,1} all-reduce(input), replica_groups={}, to_apply=add
crs1 = f32[8,12]{0,1} all-reduce(input), replica_groups={}, to_apply=add,
constrain_layout=true
ROOT result = (f32[8,12]{0,1}, f32[8,12]{0,1}) tuple(crs0, crs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(
verifier().Run(module.get()).status().message(),
HasSubstr("mix of layout constrained and unconstrained AllReduce"));
}
TEST_F(HloVerifierTest, ChannelVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,12] parameter(0)
%token0 = token[] after-all()
%send = (f32[8,12], u32[], token[]) send(%input, %token0), channel_id=1
%send-done = token[] send-done(%send), channel_id=1
%crs = f32[8,12] all-reduce(%input), replica_groups={}, to_apply=add,
channel_id=1
ROOT result = (f32[8,12]{0,1}, f32[8,12]{0,1}) tuple(%input, %crs)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("used for different types of channel instructions"));
}
TEST_F(HloVerifierTest, ChannelVerifierPartiallyPipelinedAsyncRecv) {
const char* const kModuleStr = R"(
HloModule test
while_body {
param = ((f32[16], u32[], token[])) parameter(0)
prev_recv = (f32[16], u32[], token[]) get-tuple-element(param), index=0
recv_done = (f32[16], token[]) recv-done(prev_recv), channel_id=1
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}}
ROOT tuple = ((f32[16], u32[], token[])) tuple(recv)
}
while_condition {
param = ((f32[16], u32[], token[])) parameter(0)
ROOT infinite_loop = pred[] constant(true)
}
ENTRY main_spmd {
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}}
init = ((f32[16], u32[], token[])) tuple(recv)
while = ((f32[16], u32[], token[])) while(init),
condition=while_condition, body=while_body
recv_ctx = (f32[16], u32[], token[]) get-tuple-element(while), index=0
recv_done = (f32[16], token[]) recv-done(recv_ctx), channel_id=1
ROOT result = f32[16] get-tuple-element(recv_done), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierPartiallyPipelinedAsyncSend) {
const char* const kModuleStr = R"(
HloModule test
while_body {
param = ((f32[16], u32[], token[]), f32[16]) parameter(0)
prev_send = (f32[16], u32[], token[]) get-tuple-element(param), index=0
data = f32[16] get-tuple-element(param), index=1
send_done = (f32[16], token[]) send-done(prev_send), channel_id=1
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(data, after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
ROOT tuple = ((f32[16], u32[], token[]), f32[16]) tuple(send, data)
}
while_condition {
param = ((f32[16], u32[], token[]), f32[16]) parameter(0)
ROOT infinite_loop = pred[] constant(true)
}
ENTRY main_spmd {
data = f32[16] parameter(0)
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(data, after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
init = ((f32[16], u32[], token[]), f32[16]) tuple(send, data)
while = ((f32[16], u32[], token[]), f32[16]) while(init),
condition=while_condition, body=while_body
send_ctx = (f32[16], u32[], token[]) get-tuple-element(while), index=0
ROOT send_done = (f32[16], token[]) send-done(send_ctx), channel_id=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierAsyncSend) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
data = f32[16] parameter(0)
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(after_all, data), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
ROOT send_done = (f32[16], token[]) send-done(send), channel_id=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierAsyncRecv) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
recv_done = (f32[16], token[]) recv-done(recv), channel_id=1
ROOT result = f32[16] get-tuple-element(recv_done), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierMultipleSendUsers) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
data = f32[16] parameter(0)
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(data, after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
send_done = (f32[16], token[]) send-done(send), channel_id=1
ROOT result = ((f32[16], u32[], token[]), f32[16]) tuple(send, send_done)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("send instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, ChannelVerifierMultipleRecvUsers) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
recv_done = (f32[16], token[]) recv-done(recv), channel_id=1
ROOT result = (((f32[16], u32[], token[])), f32[16])
tuple(recv, recv_done)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("recv instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, CollectiveChannelVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,12] parameter(0)
%permute = f32[8,12] collective-permute(%input),
source_target_pairs={{0,1},{1,0}}, channel_id=1
%crs = f32[8,12] all-reduce(%input), replica_groups={}, to_apply=add,
channel_id=1
ROOT result = (f32[8,12]{0,1}, f32[8,12]{0,1}) tuple(%permute, %crs)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("used for different types of channel instructions"));
}
TEST_F(HloVerifierTestLayoutSensitive, CollectivePermuteStartAndDone) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteStartAndDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
collective-permute-start.1 = (f32[2,3]{1,0:S(1)}, f32[2,3]{1,0:S(1)}, u32[], u32[]) collective-permute-start(p0), source_target_pairs={{0,1},{1,0}}, channel_id=1
ROOT collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CollectivePermuteStartAndDoneWrongType) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteStartAndDoneWrongType {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
collective-permute-start.1 = f32[2,3]{1,0:S(1)} collective-permute-start(p0), source_target_pairs={{0,1},{1,0}}, channel_id=1
ROOT collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"(f32[2,3], f32[2,3])"));
}
TEST_F(HloVerifierTest, CollectivePermuteStartAndMultipleDone) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteStartAndMultipleDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
collective-permute-start.1 = (f32[2,3]{1,0:S(1)}, f32[2,3]{1,0:S(1)}, u32[], u32[]) collective-permute-start(p0), source_target_pairs={{0,1},{1,0}}, channel_id=1
collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
ROOT collective-permute-done.2 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("collective-permute-start instruction requires one consumer, "
"found 2"));
}
TEST_F(HloVerifierTest, CollectivePermuteDoneNoCollectivePermuteStart) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteDoneNoCollectivePermuteStart {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
p1 = f32[2,3]{1,0:S(1)} parameter(1)
p2 = u32[] parameter(2)
p3 = u32[] parameter(3)
tuple.1 = (f32[2,3], f32[2,3], u32[], u32[]) tuple(p0, p1, p2, p3)
ROOT collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(tuple.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a collective-permute-done instruction "
"needs to be collective-permute-start, found tuple"));
}
TEST_F(HloVerifierTest, ComparisonTypeFloat) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = f32[] parameter(0)
ROOT cmp = pred[] compare(f32[] p0, f32[] p0), direction=LT, type=UNSIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected comparison type FLOAT or TOTALORDER"));
}
TEST_F(HloVerifierTest, ComparisonTypeSigned) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = s32[] parameter(0)
ROOT cmp = pred[] compare(s32[] p0, s32[] p0), direction=LT, type=UNSIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected comparison type SIGNED"));
}
TEST_F(HloVerifierTest, ComparisonTypeUnsigned) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = u32[] parameter(0)
ROOT cmp = pred[] compare(u32[] p0, u32[] p0), direction=LT, type=SIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected comparison type UNSIGNED"));
}
TEST_F(HloVerifierTest, ComparisonTypePred) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = pred[] parameter(0)
ROOT cmp = pred[] compare(pred[] p0, pred[] p0), direction=LT, type=SIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected comparison type UNSIGNED"));
}
TEST_F(HloVerifierTest, UseGlobalDeviceIdsEmptyReplicaGroup) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={}, channel_id=1,
use_global_device_ids=true, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("Replica groups must be specified in flattened-id mode"));
}
TEST_F(HloVerifierTest, InvalidChannelIDandUseGlobalDeviceIDs) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={},
use_global_device_ids=true, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"Invalid combination of has_channel_id and use_global_device_ids"));
}
TEST_F(HloVerifierTest, ReduceScatterInvalidOutputSize0) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} reduce-scatter(input), replica_groups={{0,1}},
to_apply=add, dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("shard_count = 1, subgroup_size = 2"));
}
TEST_F(HloVerifierTest, ReduceScatterInvalidScatterDim) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[4]{0} reduce-scatter(input), replica_groups={{0,1}},
to_apply=add, dimensions={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("ars->scatter_dimension() < ars->operand(i)->shape().rank()"));
}
TEST_F(HloVerifierTest, ReduceScatterNonUniformGroups) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[4]{0} reduce-scatter(input), replica_groups={{0,1}, {2,3,4}},
to_apply=add, dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Replica groups expected to be of uniform size"));
}
TEST_F(HloVerifierTest, ScatterInvalidScatterDim) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
Arg_0 = s8[11,6]{1,0} parameter(0)
constant = s32[] constant(1)
broadcast = s32[1,7,9,2,16,2]{5,4,3,2,1,0} broadcast(constant), dimensions={}
Arg_1 = s8[1,7,9,2,9,4,16]{6,5,4,3,2,1,0} parameter(1)
scatter = s8[11,6]{1,0} scatter(Arg_0, broadcast, Arg_1), update_window_dims={4,5}, inserted_window_dims={}, scatter_dims_to_operand_dims={1094795585,1}, index_vector_dim=5, to_apply=add
abs = s8[11,6]{1,0} abs(scatter)
ROOT tuple = (s8[11,6]{1,0}) tuple(abs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Invalid scatter_dims_to_operand_dims mapping"));
}
TEST_F(HloVerifierTest, VerifyBroadcastDimensionsOrder) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
mul = f32[32,32,32]{2,1,0} parameter(0)
ROOT broadcast = f32[32,32,32,32]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = HloVerifier{HloVerifierOpts{}.VerifyBroadcastDimensionsOrder()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Broadcast dimensions should be ordered"));
}
TEST_F(HloVerifierTest, VerifyBroadcastDimensionsOrderOK) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
mul = f32[4,5] parameter(0)
ROOT broadcast = f32[4,3,2,5] broadcast(mul), dimensions={0,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyBroadcastDimensionsOrder()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyInstructionNameChanged) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[32] parameter(1), metadata={scheduling_name="p1"}
ROOT add0 = f32[32] add(p0,p1), metadata={scheduling_name="add_changed"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction name to remain the same."));
}
TEST_F(HloVerifierTest, VerifyInstructionNameUnchanged) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[32] parameter(1), metadata={scheduling_name="p1"}
ROOT add0 = f32[32] add(p0,p1), metadata={scheduling_name="add0"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyInstructionNameSchedulingNameNotPresent) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
ROOT add0 = f32[32] add(p0,p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyInstructionNameChangedOkWithRematAndClones) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[32] parameter(1), metadata={scheduling_name="p1"}
add0.remat = f32[32] add(p0,p1), metadata={scheduling_name="add0"}
ROOT add1.clone = f32[32] add(add0.remat, p0), metadata={scheduling_name="add1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status();
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, ReshapeIsNotBitcast) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[8,3]{1,0} parameter(0)
ROOT r = f32[4,2,3]{0,1,2} reshape(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status =
HloVerifier{
HloVerifierOpts{}.MakeLayoutSensitive().VerifyReshapeIsBitcast()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Reshape should be a physical bitcast"));
}
TEST_F(HloVerifierTest, ReshapeIsBitcast) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[8]{0} parameter(0)
ROOT r = f32[4,2]{1,0} reshape(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{
HloVerifierOpts{}.MakeLayoutSensitive().VerifyReshapeIsBitcast()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyCustomCallThread) {
const char* const hlo = R"(
HloModule module
%call_body (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}, execution_thread="parallel_thread"
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %custom = s32[] custom-call(s32[] %constant.2), custom_call_target="MyCustomCall", to_apply=%call_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status =
HloVerifier{
HloVerifierOpts{}.VerifyCustomCallNestedComputationThreadName()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("custom top_apply computation execution thread does "
"not match (parallel_thread vs main)"));
}
TEST_F(HloVerifierTest, CheckWhileThread) {
const char* const hlo_string = R"(
HloModule While, entry_computation_layout={()->s32[]}
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}, execution_thread="parallel_thread"
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, CheckWhileContainsAsyncThread) {
const char* const hlo_string = R"(
HloModule While, entry_computation_layout={()->s32[]}
%async_add (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}, execution_thread="parallel_thread"
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
%async-start = ((s32[]), s32[], s32[]) custom-call-start(s32[] %prev.2), async_execution_thread="parallel_thread", custom_call_target="async_add"
%async-done = s32[] custom-call-done(((s32[]), s32[], s32[]) %async-start)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %async-done), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutFusion, DynamicUpdateSliceWithMemorySpace) {
const char* const hlo_string = R"(
HloModule fusion, is_scheduled=true
fused_computation {
%parameter.0 = bf16[1,8,1,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(0)
%parameter.1 = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(1)
%c = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)} copy(parameter.1)
%constant.1 = s32[] constant(0)
ROOT %dynamic-update-slice.1 = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)}
dynamic-update-slice(%c, %parameter.0, %constant.1, %constant.1,
%constant.1, %constant.1, %constant.1)
}
ENTRY entry (parameter.0: bf16[1,8,1,8,320], parameter.1: bf16[1,8,6,8,320]) -> bf16[1,8,6,8,320]{
%p0 = bf16[1,8,1,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(0)
%p1 = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(1)
ROOT out = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} fusion(p0, p1), kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, InvalidShardingRank) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2] parameter(0), sharding={devices=[1,2,2,1]0,1,2,3}
ROOT r = f32[4,2] copy(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("tile assignment dimensions (excluding subgroups) is "
"different than the input rank."));
}
TEST_F(HloVerifierTest, InvalidShardingDevices) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2] parameter(0), sharding={devices=[2,2]0,1,2,3}
ROOT r = f32[4,2] copy(p)
}
)";
HloModuleConfig config;
config.set_num_partitions(2);
config.set_use_spmd_partitioning(true);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo, config));
ASSERT_TRUE(module->config().use_spmd_partitioning());
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("device 2 > num_devices (2) in tile assignment"));
}
TEST_F(HloVerifierTest, InconsistentWhileSharding) {
const char* const hlo = R"(
HloModule While
%body.v3 (prev.1: s32[]) -> s32[] {
%prev.1 = s32[] parameter(0), sharding={replicated}
%constant = s32[] constant(1)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%prev.2 = s32[] parameter(0), sharding={maximal device=0}
%constant.1 = s32[] constant(5)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Inconsistent while sharding among instructions"));
}
TEST_F(HloVerifierTest, InconsistentConditionSharding) {
const char* const hlo = R"(
HloModule Module
true_branch {
tparam = (s32[], f32[4]) parameter(0)
ROOT tgte1 = f32[4] get-tuple-element(tparam), index=1
}
false_branch {
fparam = (s32[], f32[4]) parameter(0)
ROOT fgte1 = f32[4] get-tuple-element(fparam), index=1, sharding={replicated}
}
ENTRY entry {
p0 = (s32[], f32[4]) parameter(0)
constant = pred[] constant(true)
ROOT conditional = f32[4] conditional(constant, p0, p0),
true_computation=true_branch, false_computation=false_branch,
sharding={maximal device=0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("Inconsistent conditional sharding among instructions"));
}
TEST_F(HloVerifierTest, DisableS4Veridication) {
const char* const hlo = R"(
HloModule Module
ENTRY entry {
param0 = s32[] parameter(0)
x = s4[] convert(param0)
ROOT add = s4[] add(x, x)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
HloVerifier verifier{HloVerifierOpts{}.WithVerifyS4U4Usage(false)};
auto status = verifier.Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST(MetadataTrackerTest, MetadataTrackerLogsInfo) {
if (tsl::kIsOpenSource) {
return;
}
constexpr absl::string_view hlo = R"(
HloModule Module
ENTRY entry {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
ROOT sum = s32[] add(p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
::absl::ScopedMockLog log(::absl::MockLogDefault::kIgnoreUnexpected);
EXPECT_CALL(
log,
Log(absl::LogSeverity::kInfo, ::testing::EndsWith("/hlo_verifier.cc"),
::testing::StartsWith("TEST PREFIX creation_pass_coverage=0")))
.Times(1);
log.StartCapturingLogs();
{
MetadataTracker tracker("TEST PREFIX");
for (const auto* c : module->computations()) {
TF_ASSERT_OK(c->Accept(&tracker));
}
}
}
TEST_F(HloVerifierTest, TopKOK) {
const char* const hlo = R"(
HloModule topk, entry_computation_layout={(f32[10,10]{0,1})->(f32[10,2]{0,1}, s32[10,2]{0,1})}
ENTRY TopK {
x = f32[10,10]{0,1} parameter(0)
ROOT topk = (f32[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, InputLayoutMismatchIgnored) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{0,1} parameter(1)
ROOT z = f32[10,10]{1,0} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTestLayoutSensitive, InputLayoutMismatchReported) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{0,1} parameter(1)
ROOT z = f32[10,10]{1,0} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("should be compatible"));
}
TEST_F(HloVerifierTest, OutputLayoutMismatchIgnored) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{1,0} parameter(1)
ROOT z = f32[10,10]{0,1} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTestLayoutSensitive, OutputLayoutMismatchReported) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{1,0} parameter(1)
ROOT z = f32[10,10]{0,1} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("should be compatible"));
}
TEST_F(HloVerifierTestLayoutSensitive, AliasedMemorySpaceMismatchReported) {
constexpr absl::string_view kHlo = R"(
HloModule module, input_output_alias={{}: (0, {}, must-alias)},
entry_computation_layout={(f32[10]{0:S(5)})->f32[10]{0}}
ENTRY entry {
x = f32[10]{0} parameter(0)
ROOT add = f32[10]{0} add(x, x)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Shape and memory space of the result"));
}
TEST_F(HloVerifierTestLayoutSensitive, LayoutOK) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{1,0} parameter(1)
ROOT z = f32[10,10]{1,0} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTest, MixedTypeForAllGatherAllowed) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY entry {
p0 = f32[10] parameter(0)
p1 = bf16[10] parameter(1)
ROOT ag = (f32[20], bf16[20]) all-gather(p0, p1), dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTest, UnboundedDynamism) {
const char* const hlo = R"(
HloModule Module
ENTRY entry {
ROOT param0 = f32[?,784] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Unbounded dynamism is disabled"));
}
TEST_F(HloVerifierTest, EnableUnboundedDynamism) {
const char* const hlo = R"(
HloModule Module
ENTRY entry {
ROOT param0 = f32[?,784] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
HloVerifier verifier{HloVerifierOpts{}.WithAllowUnboundedDynamism(true)};
auto status = verifier.Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, SparseDotMetadataShape) {
const char* const kHlo = R"(
HloModule test
ENTRY entry {
%lhs = f32[10,16] parameter(0)
%rhs = f32[32,20] parameter(1)
%meta = u16[10,4] parameter(2)
ROOT %dot = f32[10,20] dot(%lhs, %rhs, %meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(kHlo));
HloVerifier verifier{HloVerifierOpts{}.WithAllowUnboundedDynamism(true)};
auto status = verifier.Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected sparse dot metadata"));
}
TEST_F(HloVerifierTestLayoutSensitive,
HostOffloadingDUSAndDSAreVerifiedWhenChangingLayout) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
constant_f32_0 = f32[] constant(0)
custom-call = f32[2,2048,2048]{2,1,0:S(5)} custom-call(), custom_call_target="AllocateBuffer"
data_param = f32[1,2048,2048]{2,1,0} parameter(0)
index_param = s32[] parameter(1)
constant_s32_0 = s32[] constant(0)
dynamic_update_slice = f32[2,2048,2048]{2,1,0:S(5)} dynamic-update-slice(custom-call, data_param, index_param, constant_s32_0, constant_s32_0)
ROOT dynamic_slice = f32[1,2048,2048]{2,1,0} dynamic-slice(f32[2,2048,2048]{2,1,0:S(5)} dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
HostOffloadingCopyIsVerifiedWhenChangingLayout) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
data_param = f32[2048]{0} parameter(0)
copy_0 = f32[2048]{0:S(5)} copy(f32[2048]{0} data_param)
ROOT copy_1 = f32[2048]{0} copy(f32[2048]{0:S(5)} copy_0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
HostOffloadingDSCannotChangeLayoutFromDeviceToHost) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
constant_f32_0 = f32[] constant(0)
custom-call = f32[2,2048,2048]{2,1,0} custom-call(), custom_call_target="AllocateBuffer"
data_param = f32[1,2048,2048]{2,1,0} parameter(0)
index_param = s32[] parameter(1)
constant_s32_0 = s32[] constant(0)
dynamic_update_slice = f32[2,2048,2048]{2,1,0} dynamic-update-slice(custom-call, data_param, index_param, constant_s32_0, constant_s32_0)
ROOT dynamic_slice = f32[1,2048,2048]{2,1,0:S(5)} dynamic-slice(f32[2,2048,2048]{2,1,0} dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("DynamicSlice instruction shouldn't change layout "
"memory space from device to host"));
}
TEST_F(HloVerifierTestLayoutSensitiveAndAllowMixedPrecision,
HostOffloadingCopyCannotChangeType) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
param = f32[1024,1024]{1,0:T(8,128)S(5)} parameter(0)
copy = bf16[1024,1024]{1,0:T(8,128)} copy(param)
ROOT dot = f32[1024,1024]{1,0:T(8,128)} dot(copy, copy), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[1024,1024]{1,0:T(8,128)S(5)}, actual shape is "
"bf16[1024,1024]{1,0:T(8,128)}"));
}
TEST_F(HloVerifierTestLayoutSensitiveAndAllowMixedPrecision,
HostOffloadingCopyCannotChangeLayout) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
param = f32[1024,1024]{1,0:T(8,128)S(5)} parameter(0)
ROOT copy = f32[1024,1024]{0,1:T(8,128)} copy(param)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[1024,1024]{1,0:T(8,128)S(5)}, actual shape is "
"f32[1024,1024]{0,1:T(8,128)}"));
}
TEST_F(HloVerifierTestLayoutSensitive,
MismatchedMinorToMajorSizeAndDimensionSize) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
data_param = f32[2048,2048]{1,0} parameter(0)
add = f32[2048,2048]{1,0} add(data_param, data_param)
ROOT const = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* instruction =
module->entry_computation()->parameter_instruction(0)->users().at(0);
Layout* layout = instruction->mutable_shape()->mutable_layout();
layout->add_minor_to_major(2);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction has mismatched minor-to-major size and "
"dimension size: "));
}
TEST_F(HloVerifierTest, NoErrorOnDuplicateChannelId) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
data_param = f32[2048,2048]{1,0} parameter(0)
cp1 = f32[2048,2048]{1,0} collective-permute(data_param), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
cp2 = f32[2048,2048]{1,0} collective-permute(data_param), source_target_pairs={{0,1}}, channel_id=1
ROOT tuple = (f32[2048,2048]{1,0}, f32[2048,2048]{1,0}) tuple(cp1, cp2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloVerifierOpts opts{};
opts.verify_unique_channel_ids = false;
HloVerifier verifier(std::move(opts));
ASSERT_IS_OK(verifier.Run(module.get()).status());
}
TEST_F(HloVerifierTestLayoutSensitive, Int4CompareSelect) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main {
a = s4[10]{0:E(4)} parameter(0)
b = s4[10]{0:E(4)} parameter(1)
less = pred[10] compare(a, b), direction=LT
ROOT result = select(less, a, b)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6b9c843-f0da-4d37-9327-62280c16b2b1 | cpp | tensorflow/tensorflow | float_support | third_party/xla/xla/service/float_support.cc | third_party/xla/xla/service/gpu/float_support_test.cc | #include "xla/service/float_support.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
bool FloatSupport::SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const {
switch (hlo.opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kOptimizationBarrier:
return true;
case HloOpcode::kConvert:
CHECK_EQ(operand_index, 0);
return hlo.operand(0)->shape().element_type() == low_precision_type_;
default:
break;
}
return false;
}
bool FloatSupport::SupportsLowPrecisionOutput(const HloInstruction& hlo) const {
switch (hlo.opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kOptimizationBarrier:
return true;
case HloOpcode::kConvert:
return hlo.shape().element_type() == low_precision_type_;
default:
break;
}
return false;
}
bool FloatSupport::SupportsMixedPrecisions(const HloInstruction& hlo) const {
switch (hlo.opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kConvert:
case HloOpcode::kCustomCall:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kOptimizationBarrier:
return true;
default:
break;
}
return false;
}
bool FloatSupport::EffectiveOperandPrecisionIsOutputPrecision(
const HloInstruction& hlo, int64_t operand_index) {
switch (hlo.opcode()) {
case HloOpcode::kAbs:
case HloOpcode::kAllGather:
case HloOpcode::kAllToAll:
case HloOpcode::kBroadcast:
case HloOpcode::kClamp:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kDomain:
case HloOpcode::kGetTupleElement:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
case HloOpcode::kOptimizationBarrier:
return true;
case HloOpcode::kBitcast:
return hlo.shape().element_type() ==
hlo.operand(0)->shape().element_type();
case HloOpcode::kDynamicSlice:
return operand_index == 0;
case HloOpcode::kDynamicUpdateSlice:
return operand_index == 0 || operand_index == 1;
case HloOpcode::kGather:
return operand_index == 0;
case HloOpcode::kSelect:
return operand_index == 1 || operand_index == 2;
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow: {
HloComputation* reduce_comp = hlo.called_computations()[0];
for (HloInstruction* inst : reduce_comp->instructions()) {
if (inst->opcode() == HloOpcode::kParameter) {
continue;
}
for (int64_t i = 0; i < inst->operand_count(); ++i) {
if (!EffectiveOperandPrecisionIsOutputPrecision(*inst, i)) {
return false;
}
}
}
return true;
}
default:
break;
}
return false;
}
bool FloatSupport::EffectiveOperandPrecisionIsLowPrecision(
const HloInstruction& hlo, int64_t operand_index) const {
return false;
}
} | #include <variant>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
namespace {
class FloatSupportTest : public HloTestBase {
public:
const se::GpuComputeCapability& GetGpuComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
};
class FloatSupportTestWithCublas : public FloatSupportTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = FloatSupportTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
};
class FloatSupportTestWithTriton : public FloatSupportTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = FloatSupportTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
debug_options.set_xla_gpu_triton_gemm_any(true);
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_F(FloatSupportTestWithCublas, MixedTypeDotIsNotUpcasted) {
constexpr absl::string_view kHloText = R"(
ENTRY e {
p0 = bf16[32,32] parameter(0)
p1 = bf16[32,32] parameter(1)
ROOT d = f32[32,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK-NOT: convert
; CHECK: __cublas
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-6, 1e-6}));
}
TEST_F(FloatSupportTestWithTriton, MixedTypeDotWithBF16IsNotUpcasted) {
bool skip_test = std::visit(
VariantVisitor{[](const se::CudaComputeCapability& cc) {
return !cc.IsAtLeast(se::CudaComputeCapability::AMPERE);
},
[](const se::RocmComputeCapability&) { return true; }},
GetGpuComputeCapability());
if (skip_test) {
GTEST_SKIP() << "Not supported on this GPU architecture";
}
constexpr absl::string_view kHloText = R"(
ENTRY e {
p0 = bf16[32,32] parameter(0)
p1 = bf16[32,32] parameter(1)
ROOT d = f32[32,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK-NOT: convert
; CHECK: __triton
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-6, 1e-6}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/float_support.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/float_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a38c3f36-9f9c-46a5-b6ed-575ad25977ee | cpp | tensorflow/tensorflow | call_graph | third_party/xla/xla/service/call_graph.cc | third_party/xla/xla/service/call_graph_test.cc | #include "xla/service/call_graph.h"
#include <deque>
#include <memory>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/map_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
using absl::StrAppendFormat;
using absl::StrCat;
std::string CallContextToString(CallContext context) {
switch (context) {
case CallContext::kNone:
return "kNone";
case CallContext::kControlFlow:
return "kControlFlow";
case CallContext::kEmbedded:
return "kEmbedded";
case CallContext::kBoth:
return "kBoth";
}
}
std::ostream& operator<<(std::ostream& out, const CallContext& context) {
out << CallContextToString(context);
return out;
}
CallContext GetInstructionCallContext(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kWhile:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
return CallContext::kControlFlow;
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kTopK:
case HloOpcode::kFusion:
case HloOpcode::kCustomCall:
return CallContext::kEmbedded;
default:
return CallContext::kNone;
}
}
std::string CallSite::ToString() const {
return StrCat(
instruction()->name(), " calls in context ",
CallContextToString(context()), ": ",
absl::StrJoin(called_computations(), ", ",
[](std::string* out, const HloComputation* computation) {
absl::StrAppend(out, computation->name());
}));
}
CallGraphNode::CallGraphNode(HloComputation* computation)
: computation_(computation) {}
const CallSite* CallGraphNode::GetCallSite(
const HloInstruction* instruction) const {
auto it = callsite_instructions_.find(instruction);
if (it == callsite_instructions_.end()) {
return nullptr;
}
return &callsites_[it->second];
}
absl::string_view CallGraphNode::ToString() const {
return computation_->name();
}
void CallGraphNode::AddCallerCallSite(const CallSite& caller_callsite) {
caller_callsites_.push_back(caller_callsite);
HloComputation* caller = caller_callsite.instruction()->parent();
if (!ContainsKey(caller_set_, caller)) {
callers_.push_back(caller);
caller_set_.insert(caller);
}
}
void CallGraphNode::AddCallSiteForInstruction(
HloInstruction* instruction,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
CHECK_EQ(instruction->parent(), computation());
const CallContext context = GetInstructionCallContext(instruction->opcode());
if (!instruction->called_computations().empty()) {
CHECK(context == CallContext::kControlFlow ||
context == CallContext::kEmbedded);
callsite_instructions_.insert({instruction, callsites_.size()});
callsites_.push_back(
CallSite(instruction, instruction->called_computations(), context));
for (auto* callee : callsites_.back().called_computations()) {
if (HloInstruction::IsThreadIncluded(callee->execution_thread(),
execution_threads) &&
!ContainsKey(callee_set_, callee)) {
callees_.push_back(callee);
callee_set_.insert(callee);
}
}
}
}
CallGraph::CallGraph(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: module_(module), execution_threads_(execution_threads) {}
const CallGraphNode& CallGraph::GetNode(
const HloComputation* computation) const {
DCHECK(node_indices_.contains(computation));
return nodes_[node_indices_.find(computation)->second];
}
CallGraphNode& CallGraph::GetNode(const HloComputation* computation) {
DCHECK(node_indices_.contains(computation));
return nodes_[node_indices_.find(computation)->second];
}
bool CallGraph::DominatesHelper(
const HloComputation* a, const HloComputation* b,
absl::flat_hash_set<const HloComputation*>* visited) const {
if (a == b || ContainsKey(*visited, b)) {
return true;
}
const CallGraphNode& b_node = GetNode(b);
if (b_node.callers().empty()) {
return false;
}
visited->insert(b);
for (const HloComputation* b_caller : b_node.callers()) {
if (!DominatesHelper(a, b_caller, visited)) {
return false;
}
}
return true;
}
bool CallGraph::Dominates(const HloComputation* a,
const HloComputation* b) const {
absl::flat_hash_set<const HloComputation*> visited;
return DominatesHelper(a, b, &visited);
}
bool CallGraph::CanReach(const HloComputation* a,
const HloComputation* b) const {
if (a == b) {
return true;
}
const CallGraphNode& b_node = GetNode(b);
for (const HloComputation* b_caller : b_node.callers()) {
if (CanReach(a, b_caller)) {
return true;
}
}
return false;
}
namespace {
CallContext UnionContexts(CallContext a, CallContext b) {
if (a == CallContext::kNone) {
return b;
} else if (b == CallContext::kNone) {
return a;
} else if (a == b) {
return a;
} else {
return CallContext::kBoth;
}
}
}
void CallGraph::SetCallContexts() {
std::queue<CallGraphNode*> worklist;
for (const HloComputation* computation :
module_->computations(execution_threads_)) {
CallGraphNode& node = GetNode(computation);
if (node.callers().empty()) {
node.set_context(CallContext::kControlFlow);
worklist.push(&node);
}
}
while (!worklist.empty()) {
CallGraphNode* node = worklist.front();
worklist.pop();
for (const CallSite& callsite : node->callsites()) {
for (const HloComputation* callee : callsite.called_computations()) {
if (!HloInstruction::IsThreadIncluded(callee->execution_thread(),
execution_threads_)) {
continue;
}
CallGraphNode& callee_node = GetNode(callee);
CallContext context_to_add;
if (callsite.context() == CallContext::kEmbedded) {
context_to_add = CallContext::kEmbedded;
} else {
CHECK_EQ(callsite.context(), CallContext::kControlFlow);
context_to_add = node->context();
}
CallContext new_context =
UnionContexts(context_to_add, callee_node.context());
if (new_context != callee_node.context()) {
callee_node.set_context(new_context);
worklist.push(&callee_node);
}
}
}
}
for (const HloComputation* computation :
module_->computations(execution_threads_)) {
CHECK_NE(GetNode(computation).context(), CallContext::kNone);
}
}
void CallGraph::SetNodeDepths() {
std::queue<CallGraphNode*> worklist;
for (CallGraphNode& node : nodes_) {
node.set_depth(-1);
}
for (const HloComputation* computation :
module_->computations(execution_threads_)) {
CallGraphNode& node = GetNode(computation);
if (node.callers().empty()) {
node.set_depth(0);
worklist.push(&node);
}
}
while (!worklist.empty()) {
CallGraphNode* node = worklist.front();
worklist.pop();
for (const HloComputation* callee : node->callees()) {
CallGraphNode& callee_node = GetNode(callee);
if (callee_node.depth() < node->depth() + 1) {
callee_node.set_depth(node->depth() + 1);
worklist.push(&callee_node);
}
}
}
for (CallGraphNode& node : nodes_) {
CHECK_NE(node.depth(), -1);
}
}
std::unique_ptr<CallGraph> CallGraph::Build(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto call_graph =
absl::WrapUnique<CallGraph>(new CallGraph(module, execution_threads));
VLOG(3) << "Building call graph for:";
XLA_VLOG_LINES(3, module->ToString());
for (HloComputation* computation : module->computations(execution_threads)) {
auto it_added = call_graph->node_indices_.insert(
{computation, call_graph->nodes_.size()});
CHECK(it_added.second);
call_graph->nodes_.emplace_back(computation);
for (HloInstruction* instruction : computation->instructions()) {
call_graph->nodes_.back().AddCallSiteForInstruction(instruction,
execution_threads);
}
}
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const CallSite& callsite :
call_graph->GetNode(computation).callsites()) {
for (auto* callee : callsite.called_computations()) {
if (!HloInstruction::IsThreadIncluded(callee->execution_thread(),
execution_threads)) {
continue;
}
call_graph->GetNode(callee).AddCallerCallSite(callsite);
}
}
}
call_graph->SetCallContexts();
call_graph->SetNodeDepths();
XLA_VLOG_LINES(2, call_graph->ToString());
return call_graph;
}
absl::Status CallGraph::VisitNodesInternal(
VisitorFunction visitor_func, const CallGraphNode& node,
absl::flat_hash_set<const CallGraphNode*>* visited) const {
auto pair = visited->insert(&node);
if (!pair.second) {
return absl::OkStatus();
}
for (const HloComputation* computation : node.callees()) {
TF_RETURN_IF_ERROR(
VisitNodesInternal(visitor_func, GetNode(computation), visited));
}
return visitor_func(node);
}
absl::Status CallGraph::VisitNodes(VisitorFunction visitor_func,
bool visit_unreachable_nodes) const {
absl::flat_hash_set<const CallGraphNode*> visited;
if (visit_unreachable_nodes) {
for (const CallGraphNode& node : nodes()) {
if (node.callers().empty()) {
TF_RETURN_IF_ERROR(VisitNodesInternal(visitor_func, node, &visited));
}
}
} else {
TF_RETURN_IF_ERROR(VisitNodesInternal(
visitor_func, GetNode(module_->entry_computation()), &visited));
}
return absl::OkStatus();
}
bool CallGraph::IsFlattened() const {
for (const CallGraphNode& node : nodes_) {
if (node.context() == CallContext::kBoth) {
return false;
}
if (node.context() == CallContext::kControlFlow &&
!node.computation()->IsAsyncComputation() &&
node.caller_callsites().size() > 1) {
return false;
}
}
return true;
}
std::vector<HloInstruction*> CallGraph::GetComputationCallers(
const HloComputation* c) const {
std::vector<HloInstruction*> callers;
for (const auto& callsite : GetNode(c).caller_callsites()) {
callers.push_back(callsite.instruction());
}
return callers;
}
std::pair<HloInstruction*, HloInstruction*>
CallGraph::NearestAncestorsInSameComputation(HloInstruction* a,
HloInstruction* b) const {
auto next_caller = [this](HloInstruction* instruction) -> HloInstruction* {
const CallGraphNode& node = GetNode(instruction->parent());
if (node.caller_callsites().size() != 1) {
if (instruction->parent()->IsAsyncComputation()) {
return node.caller_callsites()[0].instruction();
}
return nullptr;
}
return node.caller_callsites()[0].instruction();
};
HloInstruction* a_ancestor = a;
HloInstruction* b_ancestor = b;
int a_depth = GetNode(a->parent()).depth();
int b_depth = GetNode(b->parent()).depth();
if (a_depth > b_depth) {
for (int i = 0; i < a_depth - b_depth; ++i) {
a_ancestor = next_caller(a_ancestor);
if (a_ancestor == nullptr) {
return {nullptr, nullptr};
}
}
} else if (b_depth > a_depth) {
for (int i = 0; i < b_depth - a_depth; ++i) {
b_ancestor = next_caller(b_ancestor);
if (b_ancestor == nullptr) {
return {nullptr, nullptr};
}
}
}
while ((a_ancestor != nullptr) && (b_ancestor != nullptr)) {
if (a_ancestor->parent() == b_ancestor->parent()) {
return {a_ancestor, b_ancestor};
}
a_ancestor = next_caller(a_ancestor);
b_ancestor = next_caller(b_ancestor);
}
return {nullptr, nullptr};
}
template <typename T>
absl::flat_hash_set<const T*> CallGraph::NearestCommonAncestorsHelper(
std::vector<const T*>& starting_nodes) {
CHECK(
(std::is_same_v<T, HloInstruction> || std::is_same_v<T, HloComputation>));
if (starting_nodes.empty()) {
return absl::flat_hash_set<const T*>();
}
if (starting_nodes.size() == 1) {
return absl::flat_hash_set<const T*>({starting_nodes[0]});
}
absl::flat_hash_set<const T*> nearest_common_ancestors;
std::vector<absl::flat_hash_set<const T*>> visited_ancestors;
visited_ancestors.reserve(starting_nodes.size());
for (int idx = 0; idx < starting_nodes.size(); ++idx) {
visited_ancestors.push_back(
absl::flat_hash_set<const T*>({starting_nodes[idx]}));
}
std::vector<std::deque<const T*>> bfs_queues;
bfs_queues.reserve(starting_nodes.size());
for (int idx = 0; idx < starting_nodes.size(); ++idx) {
bfs_queues.push_back(std::deque<const T*>({starting_nodes[idx]}));
}
auto is_bfs_finished = [&bfs_queues]() -> bool {
return absl::c_all_of(
bfs_queues, [](std::deque<const T*> queue) { return queue.empty(); });
};
auto find_common_nodes = [&visited_ancestors,
&nearest_common_ancestors]() -> bool {
absl::flat_hash_set<const T*> common_nodes(visited_ancestors[0]);
for (int idx = 1; idx < visited_ancestors.size(); ++idx) {
absl::erase_if(common_nodes, [&](auto k) {
return !visited_ancestors[idx].contains(k);
});
}
nearest_common_ancestors = common_nodes;
return !nearest_common_ancestors.empty();
};
while (!is_bfs_finished() && !find_common_nodes()) {
for (int idx = 0; idx < bfs_queues.size(); ++idx) {
auto cur_queue = bfs_queues[idx];
std::deque<const T*> next_queue;
auto& visited_ancestor = visited_ancestors[idx];
while (!cur_queue.empty()) {
const T* node = cur_queue.back();
cur_queue.pop_back();
std::vector<T*> ancestors_to_visit;
if constexpr (std::is_same_v<T, HloInstruction>) {
ancestors_to_visit = node->users();
ancestors_to_visit.insert(ancestors_to_visit.end(),
node->control_successors().begin(),
node->control_successors().end());
} else if constexpr (std::is_same_v<T, HloComputation>) {
for (auto caller_instruction : GetComputationCallers(node)) {
ancestors_to_visit.push_back(caller_instruction->parent());
}
}
for (auto ancestor : ancestors_to_visit) {
if (!visited_ancestor.contains(ancestor)) {
next_queue.push_back(ancestor);
visited_ancestor.insert(ancestor);
}
}
}
bfs_queues[idx] = next_queue;
}
}
CHECK(!nearest_common_ancestors.empty())
<< "At least one nearest_common_ancestor";
if (absl::c_any_of(starting_nodes, [&nearest_common_ancestors](const T* nca) {
return nearest_common_ancestors.contains(nca);
})) {
absl::erase_if(nearest_common_ancestors, [&starting_nodes](const T* nca) {
return std::find(starting_nodes.begin(), starting_nodes.end(), nca) ==
starting_nodes.end();
});
}
return nearest_common_ancestors;
}
absl::flat_hash_set<const HloComputation*>
CallGraph::NearestCommonAncestorComputations(
std::vector<const HloComputation*> computations) {
return NearestCommonAncestorsHelper<HloComputation>(computations);
}
absl::flat_hash_set<const HloInstruction*>
CallGraph::NearestCommonAncestorInstructions(
std::vector<const HloInstruction*> instructions) {
if (instructions.empty()) {
return absl::flat_hash_set<const HloInstruction*>();
}
auto computation = instructions[0]->parent();
CHECK(absl::c_all_of(instructions, [&computation](
const HloInstruction* instruction) {
return instruction->parent() == computation;
})) << "All provided instructions should be in the same computation";
return NearestCommonAncestorsHelper<HloInstruction>(instructions);
}
std::string CallGraph::ToString() const {
std::string out;
StrAppendFormat(&out, "Call graph for module %s:\n", module_->name());
for (const CallGraphNode& node : nodes()) {
StrAppendFormat(&out, "Computation %s:\n", node.computation()->name());
StrAppendFormat(&out, " calls:\n");
for (const HloComputation* callee : node.callees()) {
StrAppendFormat(&out, " %s\n", callee->name());
}
StrAppendFormat(&out, " called by:\n");
for (const HloComputation* caller : node.callers()) {
StrAppendFormat(&out, " %s\n", caller->name());
}
StrAppendFormat(&out, " callsites:\n");
for (const CallSite& callsite : node.callsites()) {
StrAppendFormat(&out, " %s\n", callsite.ToString());
}
}
return out;
}
} | #include "xla/service/call_graph.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class CallGraphTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> MakeScalarComputation(
HloOpcode opcode = HloOpcode::kNegate) {
HloComputation::Builder builder(TestName() + ".ScalarComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(kScalarShape, opcode, param0));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeMappingComputation(
HloComputation* map_computation, int64_t callsites) {
HloComputation::Builder builder(TestName() + ".MappingComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateMap(
kScalarShape, {last_value}, map_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeCallingComputation(
HloComputation* callee_computation, int64_t callsites,
const std::string& suffix = ".CallingComputation") {
HloComputation::Builder builder(TestName() + suffix);
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateCall(
kScalarShape, {last_value}, callee_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeConditionComputation() {
HloComputation::Builder builder(TestName() + ".ConditionComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
zero, ComparisonDirection::kGt));
return builder.Build();
}
const Shape kScalarShape = ShapeUtil::MakeShape(F32, {});
};
TEST_F(CallGraphTest, SingletonComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(1, call_graph->nodes().size());
EXPECT_TRUE(call_graph->IsFlattened());
const CallGraphNode& node = call_graph->GetNode(computation);
EXPECT_EQ(computation, node.computation());
EXPECT_EQ(node.depth(), 0);
EXPECT_TRUE(node.callsites().empty());
EXPECT_TRUE(node.callees().empty());
EXPECT_TRUE(node.caller_callsites().empty());
EXPECT_TRUE(node.callers().empty());
EXPECT_EQ(CallContext::kControlFlow, node.context());
}
TEST_F(CallGraphTest, UnreachableComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(MakeScalarComputation());
HloComputation* unreachable_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
const CallGraphNode& unreachable_node =
call_graph->GetNode(unreachable_computation);
EXPECT_EQ(unreachable_node.depth(), 0);
EXPECT_EQ(unreachable_computation, unreachable_node.computation());
EXPECT_EQ(CallContext::kControlFlow, unreachable_node.context());
}
TEST_F(CallGraphTest, ParallelComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* map_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* entry_computation = module->AddEntryComputation(
MakeMappingComputation(map_computation, 5));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
EXPECT_EQ(5, entry_node.callsites().size());
EXPECT_EQ(1, entry_node.callees().size());
EXPECT_TRUE(entry_node.caller_callsites().empty());
EXPECT_TRUE(call_graph->GetComputationCallers(entry_computation).empty());
EXPECT_TRUE(entry_node.callers().empty());
const CallGraphNode& map_node = call_graph->GetNode(map_computation);
EXPECT_EQ(map_computation, map_node.computation());
EXPECT_EQ(map_node.depth(), 1);
EXPECT_EQ(CallContext::kEmbedded, map_node.context());
EXPECT_TRUE(map_node.callsites().empty());
EXPECT_TRUE(map_node.callees().empty());
EXPECT_EQ(5, map_node.caller_callsites().size());
EXPECT_EQ(5, call_graph->GetComputationCallers(map_computation).size());
EXPECT_EQ(1, map_node.callers().size());
}
TEST_F(CallGraphTest, SequentialComputations) {
auto module = CreateNewVerifiedModule();
HloComputation* called_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* entry_computation = module->AddEntryComputation(
MakeCallingComputation(called_computation, 3));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
EXPECT_FALSE(call_graph->IsFlattened());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
EXPECT_EQ(3, entry_node.callsites().size());
EXPECT_EQ(1, entry_node.callees().size());
EXPECT_TRUE(entry_node.caller_callsites().empty());
EXPECT_TRUE(call_graph->GetComputationCallers(entry_computation).empty());
EXPECT_TRUE(entry_node.callers().empty());
const CallGraphNode& called_node = call_graph->GetNode(called_computation);
EXPECT_EQ(called_computation, called_node.computation());
EXPECT_EQ(CallContext::kControlFlow, called_node.context());
EXPECT_TRUE(called_node.callsites().empty());
EXPECT_TRUE(called_node.callees().empty());
EXPECT_EQ(3, called_node.caller_callsites().size());
EXPECT_EQ(3, call_graph->GetComputationCallers(called_computation).size());
EXPECT_EQ(1, called_node.callers().size());
}
TEST_F(CallGraphTest, ContextBothComputations) {
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, subcomputation));
HloInstruction* map = builder.AddInstruction(
HloInstruction::CreateMap(kScalarShape, {call}, subcomputation));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
EXPECT_FALSE(call_graph->IsFlattened());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(2, entry_node.callsites().size());
const CallSite& call_callsite = entry_node.callsites()[0];
EXPECT_EQ(call, call_callsite.instruction());
EXPECT_THAT(call_callsite.called_computations(),
UnorderedElementsAre(subcomputation));
EXPECT_EQ(CallContext::kControlFlow, call_callsite.context());
EXPECT_EQ(entry_node.GetCallSite(call), &call_callsite);
const CallSite& map_callsite = entry_node.callsites()[1];
EXPECT_EQ(map, map_callsite.instruction());
EXPECT_THAT(map_callsite.called_computations(),
UnorderedElementsAre(subcomputation));
EXPECT_EQ(CallContext::kEmbedded, map_callsite.context());
EXPECT_EQ(entry_node.GetCallSite(map), &map_callsite);
const CallGraphNode& sub_node = call_graph->GetNode(subcomputation);
EXPECT_EQ(sub_node.depth(), 1);
EXPECT_EQ(CallContext::kBoth, sub_node.context());
}
TEST_F(CallGraphTest, ComputationWithConditional) {
auto module = CreateNewVerifiedModule();
HloComputation* true_computation =
module->AddEmbeddedComputation(MakeScalarComputation(HloOpcode::kCeil));
HloComputation* false_computation =
module->AddEmbeddedComputation(MakeScalarComputation(HloOpcode::kFloor));
HloComputation::Builder builder(TestName());
HloInstruction* pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloInstruction* const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f)));
HloInstruction* const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.6f)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
kScalarShape, pred, const1, true_computation, const2,
false_computation));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(3, call_graph->nodes().size());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(1, entry_node.callsites().size());
const CallSite& conditional_callsite = entry_node.callsites()[0];
EXPECT_EQ(conditional, conditional_callsite.instruction());
EXPECT_THAT(conditional_callsite.called_computations(),
UnorderedElementsAre(true_computation, false_computation));
EXPECT_EQ(CallContext::kControlFlow, conditional_callsite.context());
EXPECT_EQ(entry_node.GetCallSite(conditional), &conditional_callsite);
const CallGraphNode& true_node = call_graph->GetNode(true_computation);
EXPECT_EQ(true_node.depth(), 1);
EXPECT_TRUE(true_node.callees().empty());
EXPECT_EQ(1, true_node.callers().size());
EXPECT_EQ(entry_computation, true_node.callers()[0]);
const CallGraphNode& false_node = call_graph->GetNode(false_computation);
EXPECT_EQ(false_node.depth(), 1);
EXPECT_TRUE(false_node.callees().empty());
EXPECT_EQ(1, false_node.callers().size());
EXPECT_EQ(entry_computation, false_node.callers()[0]);
}
TEST_F(CallGraphTest, ComplexGraph) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloComputation* a_computation;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(5, call_graph->nodes().size());
EXPECT_FALSE(call_graph->IsFlattened());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
const CallGraphNode& a_node = call_graph->GetNode(a_computation);
const CallGraphNode& b_node = call_graph->GetNode(b_computation);
const CallGraphNode& c_node = call_graph->GetNode(c_computation);
const CallGraphNode& cond_node = call_graph->GetNode(cond_computation);
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(a_node.depth(), 1);
EXPECT_EQ(b_node.depth(), 2);
EXPECT_EQ(c_node.depth(), 3);
EXPECT_EQ(cond_node.depth(), 2);
ASSERT_EQ(1, entry_node.callsites().size());
auto called_computations = entry_node.callsites()[0].called_computations();
EXPECT_THAT(called_computations,
UnorderedElementsAre(cond_computation, a_computation));
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
EXPECT_TRUE(c_node.callsites().empty());
EXPECT_THAT(c_node.callers(),
UnorderedElementsAre(a_computation, b_computation));
EXPECT_EQ(CallContext::kBoth, c_node.context());
std::vector<const HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes([&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
}));
EXPECT_EQ(visited.size(), 5);
EXPECT_EQ(
absl::flat_hash_set<const HloComputation*>(visited.begin(), visited.end())
.size(),
5);
auto index_of = [&visited](const HloComputation* comp) {
auto it = absl::c_find(visited, comp);
EXPECT_NE(it, visited.end());
return std::distance(visited.begin(), it);
};
EXPECT_EQ(4, index_of(entry_computation));
EXPECT_LT(index_of(cond_computation), index_of(a_computation));
EXPECT_LT(index_of(c_computation), index_of(b_computation));
EXPECT_LT(index_of(b_computation), index_of(a_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, entry_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, a_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, b_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, c_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, cond_computation));
EXPECT_FALSE(call_graph->Dominates(a_computation, entry_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, entry_computation));
EXPECT_FALSE(call_graph->Dominates(c_computation, entry_computation));
EXPECT_FALSE(call_graph->Dominates(cond_computation, entry_computation));
EXPECT_TRUE(call_graph->Dominates(a_computation, a_computation));
EXPECT_TRUE(call_graph->Dominates(a_computation, b_computation));
EXPECT_TRUE(call_graph->Dominates(a_computation, c_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, a_computation));
EXPECT_FALSE(call_graph->Dominates(c_computation, a_computation));
EXPECT_FALSE(call_graph->Dominates(a_computation, cond_computation));
EXPECT_TRUE(call_graph->Dominates(b_computation, b_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, c_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, cond_computation));
EXPECT_TRUE(call_graph->Dominates(c_computation, c_computation));
EXPECT_FALSE(call_graph->Dominates(c_computation, cond_computation));
EXPECT_FALSE(call_graph->Dominates(cond_computation, c_computation));
EXPECT_TRUE(call_graph->Dominates(cond_computation, cond_computation));
}
TEST_F(CallGraphTest, ComplexGraphNearestAncestors) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloInstruction* b_map = b_computation->root_instruction();
HloComputation* a_computation;
HloInstruction* a_call;
HloInstruction* a_while;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
a_call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
a_while = builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, a_call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
HloInstruction* entry_while;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
entry_while = builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(5, call_graph->nodes().size());
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(a_call, a_call),
std::make_pair(a_call, a_call));
std::pair<HloInstruction*, HloInstruction*> null_pair = {nullptr, nullptr};
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(
b_map, c_computation->root_instruction()),
null_pair);
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(b_map, entry_while),
std::make_pair(entry_while, entry_while));
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(b_map, a_call),
std::make_pair(a_while, a_call));
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(a_while, a_call),
std::make_pair(a_while, a_call));
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(a_while, b_map),
std::make_pair(a_while, a_while));
}
TEST_F(CallGraphTest, NearestCommonAncestorInstructions) {
const std::string& hlo_string = R"(
HloModule module
ENTRY computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
p.2 = f32[10] parameter(2)
mul.0 = f32[10] multiply(p.1, p.2)
sub.0 = f32[10] subtract(add.0, mul.0)
add.1 = f32[10] add(add.0, p.2)
ROOT add.2 = f32[10] add(sub.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
namespace op = testing::opcode_matchers;
auto p0 = FindInstruction(hlo_module.get(), "p.0");
EXPECT_THAT(p0, op::Parameter());
auto p1 = FindInstruction(hlo_module.get(), "p.1");
EXPECT_THAT(p1, op::Parameter());
auto p2 = FindInstruction(hlo_module.get(), "p.2");
EXPECT_THAT(p2, op::Parameter());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto mul0 = FindInstruction(hlo_module.get(), "mul.0");
EXPECT_THAT(mul0, op::Multiply());
auto sub0 = FindInstruction(hlo_module.get(), "sub.0");
EXPECT_THAT(sub0, op::Subtract());
auto add1 = FindInstruction(hlo_module.get(), "add.1");
EXPECT_THAT(add1, op::Add());
auto add2 = FindInstruction(hlo_module.get(), "add.2");
EXPECT_THAT(add2, op::Add());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module.get());
EXPECT_EQ(1, call_graph->nodes().size());
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p0})),
absl::flat_hash_set<const HloInstruction*>({p0}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p1})),
absl::flat_hash_set<const HloInstruction*>({add0}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p1, p2})),
absl::flat_hash_set<const HloInstruction*>({sub0, add1}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, add1})),
absl::flat_hash_set<const HloInstruction*>({add1}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p1, add0})),
absl::flat_hash_set<const HloInstruction*>({add0}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p2})),
absl::flat_hash_set<const HloInstruction*>({sub0, add1}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, add2})),
absl::flat_hash_set<const HloInstruction*>({add2}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p2, mul0, sub0})),
absl::flat_hash_set<const HloInstruction*>({sub0}));
}
TEST_F(CallGraphTest, NearestCommonAncestorComputations) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloComputation* a_computation;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* a_call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, a_call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(5, call_graph->nodes().size());
EXPECT_EQ(
call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>({a_computation, a_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
EXPECT_EQ(
call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>({b_computation, c_computation})),
absl::flat_hash_set<const HloComputation*>({b_computation}));
EXPECT_EQ(call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>(
{a_computation, b_computation, c_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
EXPECT_EQ(call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>(
{c_computation, cond_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
EXPECT_EQ(call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>(
{b_computation, cond_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
}
TEST_F(CallGraphTest, VisitSingletonComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
std::vector<HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes([&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
}));
EXPECT_THAT(visited, UnorderedElementsAre(computation));
}
TEST_F(CallGraphTest, VisitUnreachableComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(MakeScalarComputation());
HloComputation* unreachable_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
{
std::vector<const HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes(
[&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
},
false));
EXPECT_EQ(visited.size(), 1);
EXPECT_EQ(visited[0], entry_computation);
}
{
std::vector<HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes(
[&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
},
true));
EXPECT_EQ(visited.size(), 2);
EXPECT_THAT(visited, UnorderedElementsAre(entry_computation,
unreachable_computation));
}
}
TEST_F(CallGraphTest, VisitWithError) {
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
absl::Status status = call_graph->VisitNodes(
[](const CallGraphNode&) { return Internal("Visitation failed"); });
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), tsl::error::INTERNAL);
ASSERT_THAT(status.message(), ::testing::HasSubstr("Visitation failed"));
}
TEST_F(CallGraphTest, ExecutionThread) {
HloComputation::Builder builder(TestName());
constexpr char kParallelThreadName[] = "parallel_thread";
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
kScalarShape, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* main_thread_computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
main_thread_computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, kParallelThreadName));
auto* parallel_thread_computation = async_done->async_wrapped_computation();
{
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(call_graph->nodes().size(), 2);
const CallGraphNode& main_thread_node =
call_graph->GetNode(main_thread_computation);
const CallGraphNode& parallel_thread_node =
call_graph->GetNode(parallel_thread_computation);
EXPECT_EQ(main_thread_node.callers().size(), 0);
EXPECT_EQ(main_thread_node.callees().size(), 1);
EXPECT_EQ(main_thread_node.depth(), 0);
EXPECT_EQ(parallel_thread_node.callers().size(), 1);
EXPECT_EQ(parallel_thread_node.callees().size(), 0);
EXPECT_EQ(parallel_thread_node.depth(), 1);
}
{
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module.get(), {HloInstruction::kMainExecutionThread});
EXPECT_EQ(call_graph->nodes().size(), 1);
const CallGraphNode& main_thread_node =
call_graph->GetNode(main_thread_computation);
EXPECT_EQ(main_thread_node.callers().size(), 0);
EXPECT_EQ(main_thread_node.callees().size(), 0);
EXPECT_EQ(main_thread_node.depth(), 0);
}
{
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module.get(), {kParallelThreadName});
EXPECT_EQ(call_graph->nodes().size(), 1);
const CallGraphNode& parallel_thread_node =
call_graph->GetNode(parallel_thread_computation);
EXPECT_EQ(parallel_thread_node.callers().size(), 0);
EXPECT_EQ(parallel_thread_node.callees().size(), 0);
EXPECT_EQ(parallel_thread_node.depth(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d4cc1ad9-6c79-4431-a236-1dfb153bfd58 | cpp | tensorflow/tensorflow | flatten_call_graph | third_party/xla/xla/service/flatten_call_graph.cc | third_party/xla/xla/service/flatten_call_graph_test.cc | #include "xla/service/flatten_call_graph.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/call_graph.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
void ReplaceCalledComputation(HloInstruction* instruction,
HloComputation* computation,
HloComputation* new_computation) {
switch (instruction->opcode()) {
case HloOpcode::kWhile: {
if (computation == instruction->while_condition()) {
instruction->set_while_condition(new_computation);
} else {
CHECK_EQ(computation, instruction->while_body());
instruction->set_while_body(new_computation);
}
break;
}
case HloOpcode::kCall: {
CHECK_EQ(instruction->to_apply(), computation);
instruction->set_to_apply(new_computation);
break;
}
case HloOpcode::kConditional: {
for (int b = 0; b < instruction->branch_count(); ++b) {
if (b == instruction->branch_count() - 1) {
CHECK_EQ(computation, instruction->branch_computation(b));
}
if (computation == instruction->branch_computation(b)) {
instruction->set_branch_computation(b, new_computation);
break;
}
}
break;
}
case HloOpcode::kAsyncStart: {
CHECK(computation->IsAsyncComputation());
computation->RemoveAsyncStart();
instruction->ReplaceCalledComputations(
[&](HloComputation*) { return new_computation; });
new_computation->AddAsyncStart(instruction);
break;
}
default:
LOG(FATAL) << "unexpected opcode: " << instruction->opcode();
}
}
absl::Status FlattenNode(const CallGraphNode& node) {
HloComputation* computation = node.computation();
HloModule* module = computation->parent();
for (int i = 0; i < node.caller_callsites().size(); ++i) {
CallSite call_site = node.caller_callsites()[i];
if (call_site.context() == CallContext::kEmbedded) {
continue;
}
CHECK_EQ(call_site.context(), CallContext::kControlFlow);
if (node.context() != CallContext::kBoth && i == 0) {
continue;
}
HloComputation* clone =
module->AddEmbeddedComputation(computation->Clone());
ReplaceCalledComputation(call_site.instruction(), computation, clone);
std::vector<HloComputation*> worklist;
worklist.push_back(clone);
while (!worklist.empty()) {
auto current = worklist.back();
worklist.pop_back();
for (auto* instruction : current->instructions()) {
if (GetInstructionCallContext(instruction->opcode()) !=
CallContext::kControlFlow) {
continue;
}
for (auto callee : instruction->called_computations()) {
HloComputation* callee_clone =
module->AddEmbeddedComputation(callee->Clone());
ReplaceCalledComputation(instruction, callee, callee_clone);
worklist.push_back(callee_clone);
}
}
}
}
return absl::OkStatus();
}
absl::Status AnnotateNode(const CallGraphNode& node) {
for (auto& callsite : node.callsites()) {
HloInstruction* instruction = callsite.instruction();
if (instruction->opcode() == HloOpcode::kFusion) {
for (HloComputation* computation : instruction->called_computations()) {
computation->SetFusionInstruction(instruction);
}
} else if (instruction->opcode() == HloOpcode::kCustomCall) {
for (HloComputation* computation : instruction->called_computations()) {
computation->SetCustomCallInstruction(instruction);
}
} else if (hlo_query::IsCollectiveCommunicationOp(instruction->opcode())) {
for (HloComputation* computation : instruction->called_computations()) {
computation->SetCollectiveCallInstruction(instruction);
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
instruction->while_body()->SetWhileCallInstruction(instruction);
} else if (instruction->opcode() == HloOpcode::kConditional) {
for (HloComputation* branch : instruction->branch_computations()) {
branch->SetConditionalCallInstruction(instruction);
}
}
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> FlattenCallGraph::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(3, "Before flatten call graph:\n" + module->ToString());
{
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module, execution_threads);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(FlattenNode));
}
{
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module, execution_threads);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(AnnotateNode));
}
XLA_VLOG_LINES(3, "After flatten call graph:\n" + module->ToString());
return true;
}
} | #include "xla/service/flatten_call_graph.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/status/statusor.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class FlattenCallGraphTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> MakeScalarComputation() {
HloComputation::Builder builder(TestName() + ".ScalarComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(kScalarShape, HloOpcode::kNegate, param0));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeMappingComputation(
HloComputation* map_computation, int64_t callsites) {
HloComputation::Builder builder(TestName() + ".MappingComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateMap(
kScalarShape, {last_value}, map_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeCallingComputation(
HloComputation* callee_computation, int64_t callsites,
const std::string& suffix = ".CallingComputation") {
HloComputation::Builder builder(TestName() + suffix);
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateCall(
kScalarShape, {last_value}, callee_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeConditionComputation() {
HloComputation::Builder builder(TestName() + ".ConditionComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
zero, ComparisonDirection::kGt));
return builder.Build();
}
absl::StatusOr<bool> RunFlattenCallGraph(HloModule* module) {
FlattenCallGraph flatten;
TF_ASSIGN_OR_RETURN(bool result, flatten.Run(module));
return result;
}
const Shape kScalarShape = ShapeUtil::MakeShape(F32, {});
};
TEST_F(FlattenCallGraphTest, ComplexGraph) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloComputation* a_computation;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
{
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> flat_call_graph = CallGraph::Build(module.get());
const CallGraphNode& c_node = flat_call_graph->GetNode(c_computation);
EXPECT_EQ(1, c_node.caller_callsites().size());
}
}
TEST_F(FlattenCallGraphTest, SharedWhileConditionAndBody) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation;
{
HloComputation::Builder builder(TestName() + ".cond");
HloInstruction* param0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(PRED, {}), "param0"));
HloInstruction* false_constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), param0, false_constant,
ComparisonDirection::kEq));
cond_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* false_constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateWhile(
ShapeUtil::MakeShape(PRED, {}), cond_computation, cond_computation,
false_constant));
entry_computation = module->AddEntryComputation(builder.Build());
}
{
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
const CallGraphNode& cond_node = call_graph->GetNode(cond_computation);
EXPECT_EQ(2, cond_node.caller_callsites().size());
}
{
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
const CallGraphNode& cond_node = call_graph->GetNode(cond_computation);
EXPECT_EQ(1, cond_node.caller_callsites().size());
}
}
TEST_F(FlattenCallGraphTest, FlattenCalls) {
auto module = CreateNewVerifiedModule();
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeCallingComputation(c_computation, 2, ".B"));
module->AddEntryComputation(
MakeCallingComputation(b_computation, 2, ".Entry"));
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(7, module->computation_count());
const CallGraphNode& c_node = call_graph->GetNode(c_computation);
EXPECT_EQ(1, c_node.caller_callsites().size());
const CallGraphNode& b_node = call_graph->GetNode(b_computation);
EXPECT_EQ(1, b_node.caller_callsites().size());
}
TEST_F(FlattenCallGraphTest, FlattenCallsInConditional) {
auto module = CreateNewVerifiedModule();
HloComputation* sub_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation::Builder builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
builder.AddInstruction(HloInstruction::CreateConditional(
kScalarShape, pred, constant1, sub_computation, constant2,
sub_computation));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, module->computation_count());
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(3, module->computation_count());
const CallGraphNode& sub_node = call_graph->GetNode(sub_computation);
EXPECT_EQ(1, sub_node.caller_callsites().size());
}
TEST_F(FlattenCallGraphTest, AsyncCall) {
std::string hlo_string = R"(
HloModule AsyncCall
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %param_0, f32[4096]{0} %param_1)
}
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%call-start.0 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), to_apply=%called_computation
%call-done.0 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %call-start.0)
%call-start.1 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %call-done.0, f32[4096]{0} %b), to_apply=%called_computation
%call-done.1 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %call-start.1)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %a, f32[4096]{0} %call-done.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
EXPECT_EQ(5, module->computation_count());
EXPECT_EQ(FindInstruction(module.get(), "call-start.0")
->async_wrapped_computation(),
FindInstruction(module.get(), "call-done.0")
->async_wrapped_computation());
EXPECT_EQ(FindInstruction(module.get(), "call-start.1")
->async_wrapped_computation(),
FindInstruction(module.get(), "call-done.1")
->async_wrapped_computation());
EXPECT_NE(FindInstruction(module.get(), "call-start.0")
->async_wrapped_computation(),
FindInstruction(module.get(), "call-start.1")
->async_wrapped_computation());
EXPECT_NE(FindInstruction(module.get(), "call-start.0")
->async_wrapped_instruction()
->called_computations()[0],
FindInstruction(module.get(), "call-start.1")
->async_wrapped_instruction()
->called_computations()[0]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/flatten_call_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/flatten_call_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4498a8c2-e52a-4d92-ab97-db20211d4a22 | cpp | tensorflow/tensorflow | custom_call_status | third_party/xla/xla/service/custom_call_status.cc | third_party/xla/xla/service/custom_call_status_test.cc | #include "xla/service/custom_call_status_internal.h"
namespace xla {
std::optional<absl::string_view> CustomCallStatusGetMessage(
const XlaCustomCallStatus* status) {
return status->message;
}
}
void XlaCustomCallStatusSetSuccess(XlaCustomCallStatus* status) {
status->message = std::nullopt;
}
void XlaCustomCallStatusSetFailure(XlaCustomCallStatus* status,
const char* message, size_t message_len) {
status->message = std::string(message, 0, message_len);
} | #include "xla/service/custom_call_status_internal.h"
#include "xla/service/custom_call_status_test_c_caller.h"
#include "tsl/platform/test.h"
TEST(XlaCustomCallStatusTest, DefaultIsSuccess) {
XlaCustomCallStatus status;
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetSuccess) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetSuccessAfterFailure) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 5);
XlaCustomCallStatusSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetFailure) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, SetFailureAfterSuccess) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetSuccess(&status);
XlaCustomCallStatusSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, SetFailureTruncatesErrorAtGivenLength) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 4);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "erro");
}
TEST(XlaCustomCallStatusTest, SetFailureTruncatesErrorAtNullTerminator) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 100);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, CSetSuccess) {
XlaCustomCallStatus status;
CSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, CSetFailure) {
XlaCustomCallStatus status;
CSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/custom_call_status.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/custom_call_status_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64ec5410-da1a-423b-b41f-7765620e772f | cpp | tensorflow/tensorflow | while_loop_invariant_code_motion | third_party/xla/xla/service/while_loop_invariant_code_motion.cc | third_party/xla/xla/service/while_loop_invariant_code_motion_test.cc | #include "xla/service/while_loop_invariant_code_motion.h"
#include <cstdint>
#include <iterator>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/compile_time_cap.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/service/while_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::flat_hash_map;
using absl::flat_hash_set;
using absl::InlinedVector;
static void CreateLoopInvariantCopy(
flat_hash_map<HloInstruction*, HloInstruction*>* hoisted_instructions,
flat_hash_set<HloInstruction*>* unhoisted_invariant_instructions,
HloInstruction* while_instr, HloInstruction* to_hoist) {
HloComputation* parent_of_while = while_instr->parent();
HloComputation* while_body = while_instr->while_body();
struct DFSFrame {
HloInstruction* instruction;
int64_t operand_index;
};
InlinedVector<DFSFrame, 8> dfs_stack;
dfs_stack.push_back({to_hoist, 0});
HloInstruction* while_body_param = while_body->parameter_instruction(0);
HloInstruction* while_operand = while_instr->mutable_operand(0);
do {
DFSFrame* frame = &dfs_stack.back();
if (frame->operand_index == frame->instruction->operand_count()) {
HloInstruction* old_instruction = frame->instruction;
auto get_new_operand = [&](HloInstruction* old_operand) {
return old_operand == while_body_param
? while_operand
: FindOrDie(*hoisted_instructions, old_operand);
};
InlinedVector<HloInstruction*, 4> new_operands;
absl::c_transform(old_instruction->operands(),
std::back_inserter(new_operands), get_new_operand);
HloInstruction* new_instruction =
parent_of_while->AddInstruction(old_instruction->CloneWithNewOperands(
old_instruction->shape(), new_operands));
InsertOrDie(hoisted_instructions, old_instruction, new_instruction);
CHECK_EQ(unhoisted_invariant_instructions->erase(old_instruction),
to_hoist != old_instruction &&
old_instruction->opcode() != HloOpcode::kConstant);
dfs_stack.pop_back();
continue;
}
HloInstruction* next_operand =
frame->instruction->mutable_operand(frame->operand_index++);
if (hoisted_instructions->contains(next_operand) ||
next_operand == while_body_param) {
continue;
}
dfs_stack.push_back({next_operand, 0});
} while (!dfs_stack.empty());
}
bool WhileLoopInvariantCodeMotion::NotWorthHoistingIndividually(
const HloInstruction& instruction) {
if (instruction.IsCustomCall("Sharding")) {
return true;
}
switch (instruction.opcode()) {
default:
return false;
case HloOpcode::kConstant:
return !hoist_constants_;
case HloOpcode::kReshape:
return !hoist_reshapes_;
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kIota:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
return true;
}
}
absl::StatusOr<bool>
WhileLoopInvariantCodeMotion::TryHoistingInvariantInstructionsFromWhileBody(
HloInstruction* while_instr, BoundNonLinearCompilerAnalysis* allowance) {
auto print_no_metadata = HloPrintOptions{}.set_print_metadata(false);
if (!while_instr->shape().IsTuple()) {
return false;
}
std::string while_instr_name = while_instr->ToString(print_no_metadata);
VLOG(2) << "Trying to hoist from " << while_instr_name;
auto maybe_upper_bound = ComputeWhileLoopTripCountUpperBound(while_instr);
if (maybe_upper_bound && *maybe_upper_bound <= 1) {
VLOG(2) << "Loop has a trip count of at most 1, skipping.";
return false;
}
HloComputation* while_body = while_instr->while_body();
flat_hash_map<HloInstruction*, HloInstruction*> hoisted_instructions;
flat_hash_set<HloInstruction*> unhoisted_invariant_instructions;
for (auto* instr : WhileUtil::GetInvariantGTEsForWhileBody(*while_body)) {
if (instr->shape().IsArray()) {
InsertOrDie(&unhoisted_invariant_instructions, instr);
}
}
if (unhoisted_invariant_instructions.empty() && !hoist_constants_) {
return false;
}
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kDomain ||
instruction->IsCustomCall("SPMDFullToShardShape") ||
instruction->IsCustomCall("SPMDShardShapeToFull")) {
return false;
}
}
std::vector<HloInstruction*> instructions_to_replace;
std::vector<HloInstruction*> replacement_instructions;
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
allowance->DeductCost(1);
if (!allowance->ContinueAnalysis()) {
return false;
}
if (instruction->HasSideEffect() ||
instruction->opcode() == HloOpcode::kAfterAll ||
instruction->opcode() == HloOpcode::kParameter ||
!instruction->control_predecessors().empty() ||
!instruction->control_successors().empty()) {
continue;
}
if (!hoist_other_ && instruction->opcode() != HloOpcode::kConstant &&
instruction->opcode() != HloOpcode::kReshape) {
continue;
}
if (hoist_size_inflation_ratio_ &&
instruction->opcode() != HloOpcode::kConstant) {
int64_t input_size = 0, output_size = 0;
for (auto* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(
operand->shape(), [&input_size, this](const Shape& subshape,
const ShapeIndex& ) {
if (subshape.IsArray()) {
input_size += shape_size_function_(subshape);
}
});
}
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&output_size, this](const Shape& subshape,
const ShapeIndex& ) {
if (subshape.IsArray()) {
output_size += shape_size_function_(subshape);
}
});
if (output_size > input_size * *hoist_size_inflation_ratio_) {
continue;
}
}
auto is_invariant = [&](HloInstruction* op) {
return hoisted_instructions.find(op) != hoisted_instructions.end() ||
unhoisted_invariant_instructions.contains(op) ||
op->opcode() == HloOpcode::kConstant;
};
if (!absl::c_all_of(instruction->operands(), is_invariant)) {
continue;
}
if (NotWorthHoistingIndividually(*instruction)) {
VLOG(2) << "Adding " << instruction->ToString(print_no_metadata)
<< " to unhoisted invariant set.";
if (instruction->opcode() != HloOpcode::kConstant) {
InsertOrDie(&unhoisted_invariant_instructions, instruction);
}
continue;
}
VLOG(2) << "Hoisting " << instruction->ToString(print_no_metadata);
CreateLoopInvariantCopy(&hoisted_instructions,
&unhoisted_invariant_instructions, while_instr,
instruction);
instructions_to_replace.push_back(instruction);
replacement_instructions.push_back(
FindOrDie(hoisted_instructions, instruction));
}
if (instructions_to_replace.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(
WhileUtil::MakeInstructionsLiveInResult live_in_instructions_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, replacement_instructions));
HloComputation* new_while_body =
live_in_instructions_result.new_while_instr->while_body();
for (int i = 0; i < instructions_to_replace.size(); i++) {
HloInstruction* instruction_to_replace_in_new_while =
FindOrDie(live_in_instructions_result.while_body_instruction_map,
instructions_to_replace[i]);
TF_RETURN_IF_ERROR(new_while_body->ReplaceInstruction(
instruction_to_replace_in_new_while,
live_in_instructions_result.while_body_live_in_values[i]));
}
VLOG(1) << "Hoisted " << instructions_to_replace.size()
<< " instructions from " << while_instr_name;
return true;
}
absl::StatusOr<bool> WhileLoopInvariantCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
BoundNonLinearCompilerAnalysis allowance(module, name(), 10);
for (HloInstruction* while_instr : while_instrs) {
if (!allowance.ContinueAnalysis()) {
break;
}
TF_ASSIGN_OR_RETURN(
bool result,
TryHoistingInvariantInstructionsFromWhileBody(while_instr, &allowance));
changed |= result;
}
if (changed) {
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module).status());
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after WhileLoopInvariantCodeMotion";
}
return changed;
}
} | #include "xla/service/while_loop_invariant_code_motion.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class WhileLoopInvariantCodeMotionTest : public HloTestBase {
public:
HloComputation* MakeAlwaysTrueComputation(const Shape& param_shape,
HloModule* module);
};
static void FindOnlyWhileInstruction(HloComputation* computation,
HloInstruction** while_instruction) {
*while_instruction = nullptr;
for (auto* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(*while_instruction, nullptr);
*while_instruction = instr;
}
}
ASSERT_NE(*while_instruction, nullptr);
}
HloComputation* WhileLoopInvariantCodeMotionTest::MakeAlwaysTrueComputation(
const Shape& param_shape, HloModule* module) {
HloComputation::Builder builder(TestName() + ".always_true");
builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
return module->AddEmbeddedComputation(builder.Build());
}
TEST_F(WhileLoopInvariantCodeMotionTest, HoistOneInvariantOperation) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
HloComputation* entry_computation = m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloInstruction* transformed_while;
FindOnlyWhileInstruction(entry_computation, &transformed_while);
EXPECT_THAT(entry_computation->instructions(), Contains(op::Add()));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::Add())));
}
TEST_F(WhileLoopInvariantCodeMotionTest, HoistInvariantOperationTree) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* gte_2_loop_variant = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 2));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
HloInstruction* mul_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kMultiply, add_result, gte_1));
HloInstruction* negate_result =
builder.AddInstruction(HloInstruction::CreateUnary(
scalar_s32, HloOpcode::kNegate, mul_result));
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(4)));
HloInstruction* sub_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kSubtract, negate_result, constant));
HloInstruction* divide_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kDivide, sub_result, gte_2_loop_variant));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, divide_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
HloComputation* entry_computation = m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloInstruction* transformed_while;
FindOnlyWhileInstruction(entry_computation, &transformed_while);
EXPECT_THAT(entry_computation->instructions(),
AllOf(Contains(op::Add()), Contains(op::Multiply()),
Contains(op::Negate()), Contains(op::Subtract()),
Contains(op::Constant()),
Not(Contains(op::Divide()))));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(AnyOf(op::Add(), op::Multiply(), op::Negate(),
op::Subtract(), op::Constant()))));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Contains(op::Divide()));
}
TEST_F(WhileLoopInvariantCodeMotionTest,
DontHoistTriviallyLoopVaryingComputation) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape = ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
builder.AddInstruction(HloInstruction::CreateTuple({gte_0, add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(), Contains(op::Add()));
}
TEST_F(WhileLoopInvariantCodeMotionTest,
DontHoistLoopVaryingComputationWithAlternatingTuples) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_1, gte_0, add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(), Contains(op::Add()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistInstructionWithSideEffects) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto token_shape = ShapeUtil::MakeTokenShape();
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, token_shape});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* in_token = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(token_shape, param, 2));
HloInstruction* out_token = builder.AddInstruction(
HloInstruction::CreateOutfeed(scalar_s32, gte_0, in_token, ""));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, out_token}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* scalar_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_s32, "param"));
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateTuple({scalar_param, scalar_param, token}));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, while_inst, 0));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
ASSERT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Outfeed()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistBitcastAlone) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto effective_scalar_s32 = ShapeUtil::MakeShape(S32, {1});
auto token_shape = ShapeUtil::MakeTokenShape();
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, token_shape});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* in_token = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(token_shape, param, 2));
HloInstruction* bitcast_inst =
builder.AddInstruction(HloInstruction::CreateUnary(
effective_scalar_s32, HloOpcode::kBitcast, gte_0));
HloInstruction* out_token =
builder.AddInstruction(HloInstruction::CreateOutfeed(
effective_scalar_s32, bitcast_inst, in_token, ""));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, out_token}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* scalar_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_s32, "param"));
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateTuple({scalar_param, scalar_param, token}));
auto* while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, while_inst, 0));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Outfeed()));
EXPECT_THAT(while_inst->while_body()->instructions(),
Contains(op::Bitcast()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, HoistBitcastIfNeeded) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
auto effective_scalar_s32 = ShapeUtil::MakeShape(S32, {1});
Shape while_shape = ShapeUtil::MakeTupleShape(
{scalar_s32, effective_scalar_s32, effective_scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(effective_scalar_s32, param, 1));
HloInstruction* bitcast_inst =
builder.AddInstruction(HloInstruction::CreateUnary(
effective_scalar_s32, HloOpcode::kBitcast, gte_0));
HloInstruction* add_inst =
builder.AddInstruction(HloInstruction::CreateBinary(
effective_scalar_s32, HloOpcode::kAdd, bitcast_inst, gte_1));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, add_inst}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
HloComputation* entry_computation = m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloInstruction* transformed_while;
FindOnlyWhileInstruction(entry_computation, &transformed_while);
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::Add())));
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::Bitcast())));
EXPECT_THAT(entry_computation->instructions(), Contains(op::Add()));
EXPECT_THAT(entry_computation->instructions(), Contains(op::Bitcast()));
}
TEST_F(WhileLoopInvariantCodeMotionTest, DontHoistControlDependencies) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape =
ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32, scalar_s32});
HloComputation* while_body;
{
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd, gte_0, gte_1));
TF_ASSERT_OK(param->AddControlDependencyTo(add_result));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, add_result}));
while_body = m->AddEmbeddedComputation(builder.Build());
}
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, BodyHasNonTupleRoot) {
auto m = CreateNewVerifiedModule();
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
Shape while_shape = ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".passthrough");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloComputation* result = m->AddEmbeddedComputation(builder.Build());
result->AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_s32, param, 1));
return result;
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
const char* const kConstantHoistingTestCase = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2]{0}) parameter(0)
p_body.1 = f32[2]{0} get-tuple-element(p_body), index=0
const = f32[2]{0} constant({3, 4})
add.0 = f32[2]{0} add(p_body.1, const)
ROOT root = (f32[2]{0}) tuple(add.0)
}
condition {
p_cond = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2]{0} constant({1, 2})
while_init = (f32[2]{0}) tuple(const_0)
ROOT while = (f32[2]{0}) while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopInvariantCodeMotionTest, HoistsConstantWhenAsked) {
auto m = ParseAndReturnVerifiedModule(kConstantHoistingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopInvariantCodeMotion{true}.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
auto wide_param_1 = op::Parameter(0);
auto get_tuple_element_1 = op::GetTupleElement(wide_param_1, 0);
auto tuple_1 = op::Tuple(get_tuple_element_1);
auto get_tuple_element_4 = op::GetTupleElement(tuple_1, 0);
auto get_tuple_element_7 = op::GetTupleElement(wide_param_1, 1);
auto add_1 = op::Add(get_tuple_element_4, get_tuple_element_7);
auto tuple_3 = op::Tuple(add_1);
auto get_tuple_element_8 = op::GetTupleElement(tuple_3, 0);
auto get_tuple_element_9 = op::GetTupleElement(wide_param_1, 1);
auto tuple_4 = op::Tuple(get_tuple_element_8, get_tuple_element_9);
EXPECT_THAT(while_body->root_instruction(), tuple_4);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoesNotHoistConstantByDefault) {
auto m = ParseAndReturnVerifiedModule(kConstantHoistingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoNotHoistOutOfSingleIteration) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param.0, param.0, param.0, param.1)
ROOT while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
const char* const kInflatingTestCase = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[]) parameter(0)
iota = f32[1024, 1024] iota(), iota_dimension=0
add = f32[1024, 1024] add(iota, iota)
constant = f32[] constant(1.0)
reduce = f32[] reduce(f32[1024, 1024] add, f32[] constant), dimensions={0,1}, to_apply=mul
ROOT root = (f32[]) tuple(reduce)
}
condition {
p_cond = (f32[]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param = f32[] parameter(0)
while_init = (f32[]) tuple(param)
ROOT while = (f32[]) while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopInvariantCodeMotionTest, HoistsInflatingByDefault) {
auto m = ParseAndReturnVerifiedModule(kInflatingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopInvariantCodeMotion(true).Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Iota())));
}
TEST_F(WhileLoopInvariantCodeMotionTest, NoHoistInflating) {
auto m = ParseAndReturnVerifiedModule(kInflatingTestCase).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopInvariantCodeMotion(false,
true,
1.0)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoesNotHoistSPMDFullToShardShape) {
auto m = CreateNewVerifiedModule();
auto array_s32 = ShapeUtil::MakeShape(S32, {4});
Shape while_shape =
ShapeUtil::MakeTupleShape({array_s32, array_s32, array_s32});
HloComputation* while_body = [&]() {
HloComputation::Builder builder(TestName() + ".while_body");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "param"));
HloInstruction* gte_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(array_s32, param, 0));
HloInstruction* gte_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(array_s32, param, 1));
HloInstruction* sharded_gte_1 = builder.AddInstruction(
HloInstruction::CreateCustomCall(array_s32, {gte_1}, "Sharding"));
sharded_gte_1->set_sharding(HloSharding::Tile1D(array_s32, 4));
HloInstruction* manually_sharded_gte_1 =
builder.AddInstruction(HloInstruction::CreateCustomCall(
array_s32, {sharded_gte_1}, "SPMDFullToShardShape"));
manually_sharded_gte_1->set_sharding(HloSharding::Manual());
HloInstruction* add_result =
builder.AddInstruction(HloInstruction::CreateBinary(
array_s32, HloOpcode::kAdd, gte_0, manually_sharded_gte_1));
HloInstruction* manually_sharded_add_result = builder.AddInstruction(
HloInstruction::CreateCustomCall(array_s32, {add_result}, "Sharding"));
manually_sharded_add_result->set_sharding(HloSharding::Manual());
HloInstruction* sharded_add_result =
builder.AddInstruction(HloInstruction::CreateCustomCall(
array_s32, {manually_sharded_add_result}, "SPMDShardShapeToFull"));
sharded_add_result->set_sharding(HloSharding::Tile1D(array_s32, 4));
builder.AddInstruction(
HloInstruction::CreateTuple({gte_0, gte_1, sharded_add_result}));
return m->AddEmbeddedComputation(builder.Build());
}();
HloComputation::Builder builder(TestName());
auto* init_value = builder.AddInstruction(
HloInstruction::CreateParameter(0, while_shape, "init_value"));
builder.AddInstruction(HloInstruction::CreateWhile(
while_shape, MakeAlwaysTrueComputation(while_shape, m.get()), while_body,
init_value));
m->AddEntryComputation(builder.Build());
LOG(INFO) << "my_test: " << m->ToString();
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopInvariantCodeMotionTest, DoesNotHoistShardingCustomCalls) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], f32[2], s32[]) parameter(0)
gte.0 = f32[2] get-tuple-element(p_body), index=0
gte.1 = f32[2] get-tuple-element(p_body), index=1
sharding.0 = f32[2] custom-call(gte.0), custom_call_target="Sharding", sharding={devices=[2]<=[2]}
sharding.1 = f32[2] custom-call(gte.1), custom_call_target="Sharding", sharding={replicated}
add.0 = f32[2] add(sharding.0, sharding.1)
gte.2 = s32[] get-tuple-element(p_body), index=2
const = s32[] constant(1)
add.1 = s32[] add(gte.2, const)
ROOT root = (f32[2], f32[2], s32[]) tuple(gte.0, add.0, add.1)
}
condition {
p_cond = (f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=2
const = s32[] constant(5)
ROOT result = pred[] compare(gte, const), direction=LT
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], f32[2], s32[]) tuple(param.0, param.0, param.1)
ROOT while = (f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopInvariantCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_invariant_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_invariant_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
526cb160-0996-4b79-93f6-c2dc2427d0a2 | cpp | tensorflow/tensorflow | reduce_scatter_combiner | third_party/xla/xla/service/reduce_scatter_combiner.cc | third_party/xla/xla/service/reduce_scatter_combiner_test.cc | #include "xla/service/reduce_scatter_combiner.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
int64_t FindMostFrequentScatterDim(
absl::Span<HloInstruction* const> to_combine) {
assert(!to_combine.empty());
int64_t min_rank = std::numeric_limits<int64_t>::max();
std::vector<int64_t> frequency;
for (const HloInstruction* it : to_combine) {
int64_t dim = Cast<HloReduceScatterInstruction>(it)->scatter_dimension();
frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())),
0);
frequency[dim]++;
min_rank = std::min(min_rank, it->shape().rank());
}
int64_t most_frequent_dim = std::distance(
frequency.begin(), std::max_element(frequency.begin(), frequency.end()));
return most_frequent_dim < min_rank ? most_frequent_dim : 0;
}
using ReduceScatterKey =
std::tuple<AllReduceKey, int64_t>;
absl::Status CombineReduceScatters(
absl::Span<HloInstruction* const> to_combine) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " reduce-scatter ops";
HloComputation& computation = *to_combine.back()->parent();
HloComputation* reduction = to_combine[0]->to_apply();
std::optional<ReductionKind> first_reduction_kind =
MatchReductionComputation(reduction);
TF_RET_CHECK(first_reduction_kind);
std::vector<HloInstruction*> operands;
std::vector<std::optional<std::vector<int64_t>>> operand_permutations;
std::vector<Shape> output_shapes;
int64_t most_frequent_dim = FindMostFrequentScatterDim(to_combine);
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kReduceScatter);
const auto* rs = Cast<HloReduceScatterInstruction>(hlo);
TF_RET_CHECK(hlo->operands().size() == 1);
std::optional<ReductionKind> reduction_kind =
MatchReductionComputation(hlo->to_apply());
TF_RET_CHECK(reduction_kind);
TF_RET_CHECK(*reduction_kind == *first_reduction_kind);
TF_RET_CHECK(hlo->shape().IsArray());
HloInstruction* operand = hlo->operands().front();
operands.push_back(operand);
operand_permutations.emplace_back();
output_shapes.push_back(hlo->shape());
if (rs->scatter_dimension() != most_frequent_dim) {
const Shape& operand_shape = operand->shape();
auto& perm = operand_permutations.back();
perm = std::vector<int64_t>(operand_shape.rank());
std::iota(perm->begin(), perm->end(), 0);
std::swap((*perm)[most_frequent_dim], (*perm)[rs->scatter_dimension()]);
operands.back() =
computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*perm, operand_shape), operand));
output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape());
}
}
HloInstruction* combined;
TF_RET_CHECK(operands.size() >= 2);
combined = computation.AddInstruction(HloInstruction::CreateReduceScatter(
ShapeUtil::MakeTupleShape(output_shapes), operands, reduction,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloReduceScatterInstruction>(to_combine.front())
->use_global_device_ids(),
most_frequent_dim));
if (to_combine.front()->has_sharding()) {
combined->set_sharding(to_combine.front()->sharding());
}
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
HloInstruction* replacement = computation.AddInstruction(
HloInstruction::CreateGetTupleElement(combined, i));
if (operand_permutations[i]) {
replacement = computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*operand_permutations[i],
replacement->shape()),
replacement));
}
TF_RETURN_IF_ERROR(
computation.ReplaceInstruction(to_combine[i], replacement));
}
return absl::OkStatus();
}
}
ReduceScatterCombiner::ReduceScatterCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count,
bool combine_by_dim)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count),
combine_by_dim_(combine_by_dim) {}
absl::StatusOr<bool> ReduceScatterCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running ReduceScatterCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip ReduceScatterCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedCollective(
*module, HloOpcode::kReduceScatter)) {
VLOG(1) << "Skip ReduceScatterCombiner because the module contains "
"reduce-scatter with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn = [&domain_map, this](const HloInstruction* instruction)
-> std::optional<ReduceScatterKey> {
auto* rs = DynCast<HloReduceScatterInstruction>(instruction);
std::optional<AllReduceKey> key =
GetAllReduceKey(instruction, domain_map.get());
if (!rs || !key) {
return std::nullopt;
}
if (!MatchReductionComputation(rs->to_apply())) {
return std::nullopt;
}
int64_t rs_dim_key = this->combine_by_dim_ ? rs->scatter_dimension() : -1;
return ReduceScatterKey{std::move(*key), rs_dim_key};
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<ReduceScatterKey>(
computation, key_fn, &CombineReduceScatters,
combine_threshold_in_bytes_, combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/reduce_scatter_combiner.h"
#include <cstddef>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr int64_t kMaxCombineCount = 256;
constexpr int64_t kMaxByteCount = 10 * 1024 * 1024;
class ReduceScatterCombinerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change,
int64_t byte_threshold = kMaxByteCount,
int64_t count_threshold = kMaxCombineCount, bool combine_by_dim = true) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
VLOG(1) << "Before running ReduceScatterCombiner: "
<< ReduceScatterCount(module.get()) << " reduce-scatter ops";
auto changed =
ReduceScatterCombiner(byte_threshold, count_threshold, combine_by_dim)
.Run(module.get());
if (!changed.ok()) {
return changed.status();
}
VLOG(1) << "After running ReduceScatterCombiner: "
<< ReduceScatterCount(module.get()) << " reduce-scatter ops";
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t ReduceScatterCount(HloModule *module) {
int64_t sum = 0;
for (auto comp : module->computations()) {
sum += absl::c_count_if(comp->instructions(),
HloPredicateIsOp<HloOpcode::kReduceScatter>);
}
return sum;
}
};
TEST_F(ReduceScatterCombinerTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4], f32[4]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, SimpleMultipleGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8, 8] parameter(1)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4, 8] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs2 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs3 = f32[8, 4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
ROOT t = (f32[4, 8], f32[4, 8], f32[8, 4], f32[8, 4])
tuple(rs0, rs1, rs2, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(ReduceScatterCount(module.get()), 2);
}
TEST_F(ReduceScatterCombinerTest, DifferentDimensions) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8, 8] parameter(1)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4, 8] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs2 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs3 = f32[8, 4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
ROOT t = (f32[4, 8], f32[4, 8], f32[8, 4], f32[8, 4])
tuple(rs0, rs1, rs2, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunPass(hlo_string, true, kMaxByteCount,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, DifferentDimensionsAndRanks) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs1 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs2 = f32[4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[8, 4], f32[8, 4], f32[4])
tuple(rs0, rs1, rs2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunPass(hlo_string, true, kMaxByteCount,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, DependentReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[2, 8] reduce-scatter(rs0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4, 8], f32[2, 8]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, DoNotCombineMismatched) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4] reduce-scatter(p1), replica_groups={{1,0}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4], f32[4]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, DoNotCombineWithoutReductionKind) {
absl::string_view hlo_string = R"(
HloModule TestModule
region_0 {
Arg_1 = bf16[] parameter(1)
Arg_0 = bf16[] parameter(0)
convert_1 = f32[] convert(Arg_1)
convert_0 = f32[] convert(Arg_0)
add0 = f32[] add(convert_1, convert_0)
ROOT convert_2 = bf16[] convert(add0)
}
region_1 {
Arg_1 = bf16[] parameter(1)
Arg_0 = bf16[] parameter(0)
convert_1 = f32[] convert(Arg_1)
convert_0 = f32[] convert(Arg_0)
add0 = f32[] add(convert_1, convert_0)
ROOT convert_2 = bf16[] convert(add0)
}
ENTRY entry{
param0 = bf16[512,256]{1,0} parameter(0)
param1 = bf16[512,256]{1,0} parameter(1)
reduce-scatter.0 = bf16[512,256]{1,0} reduce-scatter(param0),
replica_groups={{0}}, dimensions={0}, to_apply=region_0
reduce-scatter.1 = bf16[512,256]{1,0} reduce-scatter(param1),
replica_groups={{0}}, dimensions={0}, to_apply=region_1
ROOT add.0 = tuple(reduce-scatter.0, reduce-scatter.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, HighThreshold) {
absl::string_view hlo_string = R"(
HloModule m
sum_reduce {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY main {
param.0 = bf16[1024,32768]{1,0} parameter(0)
param.1 = bf16[4096,8192]{1,0} parameter(1)
param.2 = bf16[3,128,64,1024]{2,1,0,3}parameter(2)
param.3 = bf16[1024,128,64]{2,1,0} parameter(3)
reduce-scatter.19 = bf16[1024,32768]{1,0} reduce-scatter(param.0),
channel_id=132, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
reduce-scatter.21 = bf16[4096,8192]{1,0} reduce-scatter(param.1),
channel_id=134, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
reduce-scatter.23 = bf16[3,128,64,1024]{2,1,0,3} reduce-scatter(param.2),
channel_id=136, replica_groups={{0}}, dimensions={3}, to_apply=sum_reduce
reduce-scatter.25 = bf16[1024,128,64]{2,1,0} reduce-scatter(param.3),
channel_id=138, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
ROOT tuple = tuple(reduce-scatter.19, reduce-scatter.21, reduce-scatter.23,
reduce-scatter.25)
})";
int64_t combined_bytes = 67108864 + 67108864 + 50331648 + 16777216;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunPass(hlo_string, true,
combined_bytes,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
da9cd0b1-a718-4bc5-a59f-dcc55283c76a | cpp | tensorflow/tensorflow | collective_ops_utils | third_party/xla/xla/service/collective_ops_utils.cc | third_party/xla/xla/service/collective_ops_utils_test.cc | #include "xla/service/collective_ops_utils.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
std::optional<ReductionKind> MatchReductionInstruction(
const HloInstruction* hlo) {
PrimitiveType type = hlo->shape().element_type();
switch (hlo->opcode()) {
case HloOpcode::kAdd:
return ReductionKind::SUM;
case HloOpcode::kMultiply:
return ReductionKind::PRODUCT;
case HloOpcode::kMinimum:
return ReductionKind::MIN;
case HloOpcode::kMaximum:
return ReductionKind::MAX;
case HloOpcode::kAnd:
return type == PRED ? std::optional<ReductionKind>(ReductionKind::MIN)
: std::nullopt;
case HloOpcode::kOr:
return type == PRED ? std::optional<ReductionKind>(ReductionKind::MAX)
: std::nullopt;
default:
return std::nullopt;
}
}
std::optional<ReductionKind> MatchReductionComputation(
const HloComputation* computation) {
namespace m = match;
const HloInstruction* root = computation->root_instruction();
auto kind = MatchReductionInstruction(root);
if (kind && !Match(root, m::Op()
.WithBinaryOperandsAnyOrder(m::Parameter(0),
m::Parameter(1))
.WithShape(m::Shape().IsEffectiveScalar()))) {
kind = std::nullopt;
}
return kind;
}
std::optional<Literal> GetReductionIdentity(ReductionKind kind,
PrimitiveType type) {
switch (kind) {
case ReductionKind::SUM:
return LiteralUtil::Zero(type);
case ReductionKind::PRODUCT:
return LiteralUtil::One(type);
case ReductionKind::MIN:
return LiteralUtil::MaxValue(type);
case ReductionKind::MAX:
return LiteralUtil::MinValue(type);
default:
return std::nullopt;
}
}
absl::StatusOr<std::vector<int>> GetParticipatingIDs(
CollectiveOpGroupMode group_mode, int current_id,
std::optional<int> total_participant_count,
absl::Span<const ReplicaGroup> groups) {
if (groups.empty()) {
TF_RET_CHECK(total_participant_count.has_value());
std::vector<int> all_participants(*total_participant_count);
absl::c_iota(all_participants, 0);
return all_participants;
}
auto group_formatter = [](std::string* out, const ReplicaGroup& group) {
out->append("[");
out->append(absl::StrJoin(group.replica_ids(), ", "));
out->append("]");
};
std::optional<ReplicaGroup> group;
for (const ReplicaGroup& g : groups) {
if (absl::c_linear_search(g.replica_ids(), current_id)) {
TF_RET_CHECK(!group.has_value())
<< "Replica ID " << current_id << " appears twice in replica groups"
<< "; group_mode=" << CollectiveOpGroupModeToString(group_mode)
<< "; groups_size=" << groups.size()
<< "; groups= " << absl::StrJoin(groups, ", ", group_formatter);
group = g;
}
}
TF_RET_CHECK(group.has_value())
<< "Replica ID " << current_id << " doesn't appear in replica groups"
<< "; group_mode=" << CollectiveOpGroupModeToString(group_mode)
<< "; groups_size=" << groups.size()
<< "; groups= " << absl::StrJoin(groups, ", ", group_formatter);
return std::vector<int>(group->replica_ids().begin(),
group->replica_ids().end());
}
absl::StatusOr<CollectiveOpGroupMode> GetCollectiveOpGroupMode(
bool has_channel_id, std::optional<bool> use_global_device_ids) {
if (!has_channel_id) {
if (!use_global_device_ids.has_value() || !*use_global_device_ids) {
return CollectiveOpGroupMode::kCrossReplica;
} else {
return InvalidArgument(
"Invalid combination of has_channel_id and use_global_device_ids");
}
} else {
if (!use_global_device_ids.has_value()) {
return CollectiveOpGroupMode::kCrossPartition;
} else if (!*use_global_device_ids) {
return CollectiveOpGroupMode::kCrossReplicaAndPartition;
} else {
return CollectiveOpGroupMode::kFlattenedID;
}
}
}
absl::string_view CollectiveOpGroupModeToString(
CollectiveOpGroupMode group_mode) {
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
return "kCrossReplica";
case CollectiveOpGroupMode::kCrossPartition:
return "kCrossPartition";
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return "kCrossReplicaAndPartition";
case CollectiveOpGroupMode::kFlattenedID:
return "kFlattenedID";
}
}
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
GetParticipatingDevicesGroups(const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
int replica_count = device_assignment.replica_count();
int partition_count = device_assignment.computation_count();
std::vector<ReplicaGroup> participating_replica_groups =
SpanToVector(replica_groups);
if (replica_groups.empty()) {
if (group_mode == CollectiveOpGroupMode::kFlattenedID) {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
}
int total_participant_count;
if (group_mode == CollectiveOpGroupMode::kCrossPartition) {
total_participant_count = partition_count;
} else {
total_participant_count = replica_count;
}
ReplicaGroup replica_group = ReplicaGroup();
for (int id = 0; id < total_participant_count; id++) {
replica_group.add_replica_ids(id);
}
participating_replica_groups.push_back(replica_group);
}
std::vector<std::vector<GlobalDeviceId>> groups;
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
for (const auto& replica_group : participating_replica_groups) {
for (int partition_id = 0; partition_id < partition_count;
partition_id++) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int replica_id : replica_group.replica_ids()) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
}
return groups;
}
case CollectiveOpGroupMode::kCrossPartition: {
for (const auto& replica_group : participating_replica_groups) {
for (int replica_id = 0; replica_id < replica_count; replica_id++) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int partition_id : replica_group.replica_ids()) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
}
return groups;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
for (const auto& replica_group : participating_replica_groups) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size() *
partition_count);
for (int replica_id : replica_group.replica_ids()) {
for (int partition_id = 0; partition_id < partition_count;
partition_id++) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
}
groups.push_back(participants);
}
return groups;
}
case CollectiveOpGroupMode::kFlattenedID: {
for (const auto& replica_group : participating_replica_groups) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int flattened_id : replica_group.replica_ids()) {
int replica_id = flattened_id / partition_count;
int partition_id = flattened_id % partition_count;
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
return groups;
}
}
}
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
absl::flat_hash_map<GlobalDeviceId, int64_t> device_id_to_flattened_id;
for (int r = 0; r < device_assignment.replica_count(); ++r) {
for (int c = 0; c < device_assignment.computation_count(); ++c) {
GlobalDeviceId device_id = GlobalDeviceId(device_assignment(r, c));
int64_t flattened_id = r * device_assignment.computation_count() + c;
device_id_to_flattened_id[device_id] = flattened_id;
}
}
std::vector<ReplicaGroup> flattened_id_groups;
TF_ASSIGN_OR_RETURN(std::vector<std::vector<GlobalDeviceId>> device_groups,
GetParticipatingDevicesGroups(
device_assignment, replica_groups, group_mode));
for (const auto& device_group : device_groups) {
ReplicaGroup flattened_id_group;
flattened_id_group.mutable_replica_ids()->Reserve(device_group.size());
for (const GlobalDeviceId& device_id : device_group) {
flattened_id_group.add_replica_ids(device_id_to_flattened_id[device_id]);
}
flattened_id_groups.push_back(flattened_id_group);
}
return flattened_id_groups;
}
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode replica_group_mode, int replica_count,
int partition_count) {
std::vector<ReplicaGroup> filled_empty_replica_group;
absl::Span<const ReplicaGroup> original_replica_groups = replica_groups;
std::vector<ReplicaGroup> flattened_replica_groups;
if (replica_groups.empty()) {
filled_empty_replica_group.emplace_back();
const int64_t id_count =
replica_group_mode == CollectiveOpGroupMode::kCrossPartition
? partition_count
: replica_count;
for (int i = 0; i < id_count; ++i) {
filled_empty_replica_group.back().add_replica_ids(i);
}
original_replica_groups = filled_empty_replica_group;
}
if (replica_group_mode == CollectiveOpGroupMode::kFlattenedID) {
flattened_replica_groups.insert(flattened_replica_groups.end(),
original_replica_groups.begin(),
original_replica_groups.end());
} else if (replica_group_mode == CollectiveOpGroupMode::kCrossReplica) {
flattened_replica_groups.resize(original_replica_groups.size() *
partition_count);
for (int64_t i = 0, current_group_offset = 0;
i < original_replica_groups.size();
++i, current_group_offset += partition_count) {
for (int64_t replica_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t partition_id = 0; partition_id < partition_count;
++partition_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[current_group_offset + partition_id]
.add_replica_ids(flattened_id);
}
}
}
} else if (replica_group_mode == CollectiveOpGroupMode::kCrossPartition) {
flattened_replica_groups.resize(original_replica_groups.size() *
replica_count);
for (int64_t i = 0, current_group_offset = 0;
i < original_replica_groups.size();
++i, current_group_offset += replica_count) {
for (int64_t partition_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t replica_id = 0; replica_id < replica_count; ++replica_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[current_group_offset + replica_id]
.add_replica_ids(flattened_id);
}
}
}
} else {
CHECK(replica_group_mode ==
CollectiveOpGroupMode::kCrossReplicaAndPartition);
flattened_replica_groups.resize(original_replica_groups.size());
for (int64_t i = 0; i < original_replica_groups.size(); ++i) {
for (int64_t replica_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t partition_id = 0; partition_id < partition_count;
++partition_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[i].add_replica_ids(flattened_id);
}
}
}
}
return flattened_replica_groups;
}
absl::StatusOr<std::vector<GlobalDeviceId>> GetParticipatingDevices(
GlobalDeviceId device_id, const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
int replica_count = device_assignment.replica_count();
int partition_count = device_assignment.computation_count();
TF_ASSIGN_OR_RETURN(const DeviceAssignment::LogicalID logical_id,
device_assignment.LogicalIdForDevice(device_id));
int current_replica_id = logical_id.replica_id;
int current_partition_id = logical_id.computation_id;
TF_RET_CHECK(0 <= current_replica_id && current_replica_id < replica_count)
<< current_replica_id << " " << replica_count;
TF_RET_CHECK(0 <= current_partition_id &&
current_partition_id < partition_count)
<< current_partition_id << " " << partition_count;
std::vector<GlobalDeviceId> participants;
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
TF_ASSIGN_OR_RETURN(std::vector<int> participating_replicas,
GetParticipatingIDs(group_mode, current_replica_id,
replica_count, replica_groups));
participants.reserve(participating_replicas.size());
for (int replica_id : participating_replicas) {
TF_RET_CHECK(0 <= replica_id && replica_id < replica_count)
<< replica_id << " " << replica_count;
participants.emplace_back(
device_assignment(replica_id, current_partition_id));
}
return participants;
}
case CollectiveOpGroupMode::kCrossPartition: {
TF_ASSIGN_OR_RETURN(std::vector<int> participating_partitions,
GetParticipatingIDs(group_mode, current_partition_id,
partition_count, replica_groups));
participants.reserve(participating_partitions.size());
for (int partition_id : participating_partitions) {
TF_RET_CHECK(0 <= partition_id && partition_id < partition_count)
<< partition_id << " " << partition_count;
participants.emplace_back(
device_assignment(current_replica_id, partition_id));
}
return participants;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
TF_ASSIGN_OR_RETURN(std::vector<int> participating_replicas,
GetParticipatingIDs(group_mode, current_replica_id,
replica_count, replica_groups));
participants.reserve(participating_replicas.size() * partition_count);
for (int replica_id : participating_replicas) {
TF_RET_CHECK(0 <= replica_id && replica_id < replica_count)
<< replica_id << " " << replica_count;
for (int partition_id = 0; partition_id < partition_count;
++partition_id) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
}
return participants;
}
case CollectiveOpGroupMode::kFlattenedID: {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
int current_flattened_id =
current_replica_id * partition_count + current_partition_id;
TF_ASSIGN_OR_RETURN(
std::vector<int> participating_flattened_ids,
GetParticipatingIDs(group_mode, current_flattened_id,
std::nullopt,
replica_groups));
participants.reserve(participating_flattened_ids.size());
for (int flattened_id : participating_flattened_ids) {
int replica_id = flattened_id / partition_count;
TF_RET_CHECK(0 <= replica_id && replica_id < replica_count)
<< replica_id << " " << replica_count;
int partition_id = flattened_id % partition_count;
participants.emplace_back(device_assignment(replica_id, partition_id));
}
return participants;
}
}
}
absl::StatusOr<std::vector<int64_t>> GetPariticipantCountsForReplicaGroups(
int64_t num_replicas, int64_t num_partitions,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
std::vector<int64_t> participant_counts;
std::vector<ReplicaGroup> participating_replica_groups =
SpanToVector(replica_groups);
if (replica_groups.empty()) {
if (group_mode == CollectiveOpGroupMode::kFlattenedID) {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
}
int total_participant_count;
if (group_mode == CollectiveOpGroupMode::kCrossPartition) {
total_participant_count = num_partitions;
} else {
total_participant_count = num_replicas;
}
ReplicaGroup replica_group = ReplicaGroup();
for (int id = 0; id < total_participant_count; id++) {
replica_group.add_replica_ids(id);
}
participating_replica_groups.push_back(replica_group);
}
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
for (const auto& replica_group : participating_replica_groups) {
for (int partition_id = 0; partition_id < num_partitions;
++partition_id) {
participant_counts.push_back(replica_group.replica_ids().size());
}
}
return participant_counts;
}
case CollectiveOpGroupMode::kCrossPartition: {
for (const auto& replica_group : participating_replica_groups) {
participant_counts.push_back(replica_group.replica_ids().size());
}
return participant_counts;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
for (const auto& replica_group : participating_replica_groups) {
participant_counts.push_back(replica_group.replica_ids().size() *
num_partitions);
}
return participant_counts;
}
case CollectiveOpGroupMode::kFlattenedID: {
for (const auto& replica_group : participating_replica_groups) {
participant_counts.push_back(replica_group.replica_ids().size());
}
return participant_counts;
}
}
}
bool ReplicaGroupsOrthogonal(absl::Span<const ReplicaGroup> first,
absl::Span<const ReplicaGroup> second) {
if (first.size() != second[0].replica_ids_size()) {
return false;
}
if (first[0].replica_ids_size() != second.size()) {
return false;
}
for (int64_t i = 0; i < first.size(); ++i) {
for (int64_t j = 0; j < first[i].replica_ids_size(); ++j) {
if (first[i].replica_ids(j) != second[j].replica_ids(i)) {
return false;
}
}
}
return true;
}
bool ReplicaGroupsEqual(absl::Span<const ReplicaGroup> first,
absl::Span<const ReplicaGroup> second) {
if (first.size() != second.size()) {
return false;
}
for (int64_t i = 0; i < first.size(); ++i) {
if (first[i].replica_ids_size() != second[i].replica_ids_size()) {
return false;
}
for (int j = 0; j < first[i].replica_ids_size(); ++j) {
if (first[i].replica_ids(j) != second[i].replica_ids(j)) {
return false;
}
}
}
return true;
}
bool IsNonFusionCollective(const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kReduceScatter:
return true;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
return IsNonFusionCollective(instruction->async_wrapped_instruction());
default:
return false;
}
}
bool IsCollective(const HloInstruction* instruction) {
if (IsNonFusionCollective(instruction)) {
return true;
}
if (instruction->opcode() == HloOpcode::kFusion &&
instruction->IsCustomFusion()) {
for (const auto* inner_inst : instruction->fused_instructions()) {
if (IsCollective(inner_inst)) {
return true;
}
}
}
return false;
}
HloInstruction* IsOrHasCollectiveWithChannelId(HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
for (auto* inner_inst : instruction->fused_instructions()) {
if (IsOrHasCollectiveWithChannelId(inner_inst) != nullptr) {
return inner_inst;
}
}
return nullptr;
}
if (DynCast<HloChannelInstruction>(instruction) == nullptr) {
return nullptr;
}
if (IsCollective(instruction) && instruction->channel_id().has_value()) {
return instruction;
}
return nullptr;
}
bool IsSyncCollective(const HloInstruction* instr) {
auto backend_config = instr->backend_config<xla::gpu::GpuBackendConfig>();
if (!backend_config.ok()) {
return false;
}
return backend_config->collective_backend_config().is_sync();
}
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
bool IsForwardCycle(const SourceTargetPairs& pairs) {
int64_t size = pairs.size();
if (size <= 1) return false;
const SourceTargetPair& last_pair = pairs[size - 1];
if (last_pair.first != size - 1 || last_pair.second != 0) {
return false;
}
for (int64_t i = 0; i < size - 1; ++i) {
const SourceTargetPair& pair = pairs[i];
if (pair.first != i || pair.second != i + 1) {
return false;
}
}
return true;
}
bool IsBackwardCycle(const SourceTargetPairs& pairs) {
int64_t size = pairs.size();
if (size <= 1) return false;
const SourceTargetPair& first_pair = pairs[0];
if (first_pair.first != 0 || first_pair.second != size - 1) {
return false;
}
for (int64_t i = 1; i < size; ++i) {
const SourceTargetPair& pair = pairs[i];
if (pair.first != i || pair.second != i - 1) {
return false;
}
}
return true;
}
bool IsExclusivelyCrossModule(absl::Span<const ReplicaGroup> replica_groups,
bool use_global_ids, bool has_channel_id,
const DeviceAssignment& device_assignment) {
if (!has_channel_id) {
return false;
}
if (!use_global_ids) {
for (const ReplicaGroup& replica_group : replica_groups) {
if (replica_group.replica_ids_size() != 1) {
return false;
}
}
return true;
}
int64_t partition_count = device_assignment.computation_count();
for (const ReplicaGroup& replica_group : replica_groups) {
std::optional<int64_t> first_replica_id;
for (int64_t global_id : replica_group.replica_ids()) {
int64_t replica_id = global_id / partition_count;
if (!first_replica_id.has_value()) {
first_replica_id = replica_id;
} else if (replica_id != first_replica_id) {
return false;
}
}
}
return true;
}
} | #include "xla/service/collective_ops_utils.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <sstream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
std::vector<ReplicaGroup> CreateReplicaGroups(
const std::vector<std::vector<int64_t>> &replica_groups) {
std::vector<ReplicaGroup> result;
result.reserve(replica_groups.size());
for (const auto &replica_group : replica_groups) {
ReplicaGroup &group = result.emplace_back();
for (auto id : replica_group) {
group.add_replica_ids(id);
}
}
return result;
}
TEST(CollectiveOpsUtilsTest, GetParticipatingIDs_NoReplicaGroups) {
std::vector<int> actual =
GetParticipatingIDs(CollectiveOpGroupMode::kFlattenedID,
0, 3,
{})
.value();
std::vector<int> expected = {0, 1, 2};
EXPECT_EQ(actual, expected);
}
TEST(CollectiveOpsUtilsTest, GetParticipatingIDs_ReplicaGroups) {
std::vector<ReplicaGroup> replica_groups(3);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(4);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(5);
replica_groups[2].add_replica_ids(2);
replica_groups[2].add_replica_ids(3);
std::vector<int> actual =
GetParticipatingIDs(CollectiveOpGroupMode::kFlattenedID,
1,
std::nullopt,
replica_groups)
.value();
std::vector<int> expected = {1, 5};
EXPECT_EQ(actual, expected);
}
TEST(CollectiveOpsUtilsTest, CollectiveWithChannelId) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %cluster {
%param0 = f32[512]{0} parameter(0)
%copy0 = f32[512]{0} copy(param0)
%reshape0 = f32[1,1,512]{2,0,1} reshape(f32[512]{0} %copy0)
%all-gather = f32[1,4,512]{2,0,1} all-gather(f32[1,1,512]{2,0,1} %reshape0), channel_id=3621, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
%copy1 = f32[1,4,512]{2,0,1} copy(all-gather)
ROOT root = f32[1,4,512]{2,1,0} copy(%copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction *all_gather =
module->entry_computation()->GetInstructionWithName("all-gather");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(all_gather), all_gather);
}
TEST(CollectiveOpsUtilsTest, CollectiveWithChannelId2) {
ReplicaGroup group;
for (int64_t i = 0; i < 8; i++) {
group.add_replica_ids(i);
}
auto builder = HloComputation::Builder("CollectiveWithChannelId2");
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * param_0,
builder.AddParameter(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {1, 512, 4096}), "p0")));
HloInstruction *instr =
builder.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeShape(BF16, {1, 4096, 4096}), {param_0}, 1,
CollectiveDeviceList({group}), true, 231, true));
auto computation = builder.Build(
builder.AddInstruction(HloInstruction::CreateTuple({instr})));
auto fusion =
HloInstruction::CreateFusion(ShapeUtil::MakeShape(BF16, {1, 4096, 4096}),
HloInstruction::FusionKind::kOutput,
{param_0}, computation.get(), "fusion");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(fusion.get()), instr);
auto builder2 = HloComputation::Builder("CollectiveWithChannelId2");
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * param_1,
builder2.AddParameter(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {1, 512, 4096}), "p1")));
HloInstruction *instr_without_channel_id =
builder2.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeShape(BF16, {1, 4096, 4096}), {param_1}, 1, {group},
true, std::nullopt, true));
auto computation2 = builder2.Build(builder2.AddInstruction(
HloInstruction::CreateTuple({instr_without_channel_id})));
auto fusion2 =
HloInstruction::CreateFusion(ShapeUtil::MakeShape(BF16, {1, 4096, 4096}),
HloInstruction::FusionKind::kOutput,
{param_1}, computation2.get(), "fusion2");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(fusion2.get()), nullptr);
}
TEST(CollectiveOpsUtilsTest, IsForwardCycle) {
EXPECT_TRUE(IsForwardCycle({{0, 1}, {1, 0}}));
EXPECT_TRUE(IsForwardCycle({{0, 1}, {1, 2}, {2, 3}, {3, 0}}));
EXPECT_FALSE(IsForwardCycle({{0, 0}})) << "Self link is not a cycle!";
EXPECT_FALSE(IsForwardCycle({{}})) << "Self link due to initialization to 0";
EXPECT_FALSE(IsForwardCycle({}));
EXPECT_FALSE(IsForwardCycle({{0, 1}}));
EXPECT_FALSE(IsForwardCycle({{0, 1}, {2, 0}})) << "No link between 1 and 2";
EXPECT_FALSE(IsForwardCycle({{1, 0}, {0, 1}})) << "Backward cycle";
EXPECT_FALSE(IsForwardCycle({{3, 0}, {0, 1}, {1, 2}, {2, 3}}))
<< "Unordered pairs are not a cycle";
EXPECT_FALSE(IsForwardCycle({{0, 1}, {1, 2}, {2, 3}, {4, 5}, {3, 0}}))
<< "Out of order pairs are not a cycle";
}
TEST(CollectiveOpsUtilsTest, IsBackwardCycle) {
EXPECT_TRUE(IsBackwardCycle({{0, 1}, {1, 0}}));
EXPECT_TRUE(IsBackwardCycle({{0, 3}, {1, 0}, {2, 1}, {3, 2}}));
EXPECT_FALSE(IsBackwardCycle({{0, 0}})) << "Self link is a backward cycle!";
EXPECT_FALSE(IsBackwardCycle({{}})) << "Self link due to initialization to 0";
EXPECT_FALSE(IsForwardCycle({}));
EXPECT_FALSE(IsForwardCycle({{1, 0}}));
EXPECT_FALSE(IsForwardCycle({{2, 1}, {0, 2}})) << "No link between 1 and 2";
EXPECT_FALSE(IsBackwardCycle({{3, 2}, {0, 3}, {1, 0}, {2, 1}}))
<< "Unordered pairs are not a cycle";
EXPECT_FALSE(IsForwardCycle({{0, 1}, {1, 2}, {4, 5}, {3, 0}}))
<< "Out of order pairs are not a cycle";
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaNoChannelSet) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
DeviceAssignment device_assignment(num_replicas, num_partitions);
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1}, {2, 3}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, false,
false, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaAndCrossModuleNoGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
DeviceAssignment device_assignment(num_replicas, num_partitions);
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1}, {2, 3}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, false,
true, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossModuleNoGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0}, {1}, {2}, {3}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, false,
true, device_assignment);
EXPECT_TRUE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaWithGlobalIds) {
int64_t num_replicas = 8;
int64_t num_partitions = 1;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1, 2, 3, 4, 5, 6, 7}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, true,
true, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaAndCrossModuleWithGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1, 2, 3, 4, 5, 6, 7}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, true,
true, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossModuleWithGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1}, {2, 3}, {4, 5}, {6, 7}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, true,
true, device_assignment);
EXPECT_TRUE(is_exclusively_cross_module);
}
}
namespace GetCollectiveOpGroupModeTest {
struct TestCase {
bool has_channel_id;
std::optional<bool> use_global_device_ids;
std::optional<xla::CollectiveOpGroupMode> expected;
std::string ToString() const {
std::ostringstream s;
s << (has_channel_id ? "chnl" : "nochnl");
s << "_"
<< (use_global_device_ids
? (*use_global_device_ids ? "ugdi_true" : "ugdi_false")
: "nougdi");
return s.str();
}
};
std::vector<TestCase> GetTestCases() {
const std::vector<TestCase> test_cases = {
{false, std::nullopt, CollectiveOpGroupMode::kCrossReplica},
{false, false, CollectiveOpGroupMode::kCrossReplica},
{false, true, std::nullopt},
{true, std::nullopt, CollectiveOpGroupMode::kCrossPartition},
{true, false, CollectiveOpGroupMode::kCrossReplicaAndPartition},
{true, true, CollectiveOpGroupMode::kFlattenedID},
};
return test_cases;
}
class GetCollectOpGroupModeTest : public testing::TestWithParam<TestCase> {};
TEST_P(GetCollectOpGroupModeTest, Test) {
const TestCase &tc = GetParam();
absl::StatusOr<CollectiveOpGroupMode> actual =
GetCollectiveOpGroupMode(tc.has_channel_id, tc.use_global_device_ids);
if (tc.expected) {
TF_ASSERT_OK(actual.status());
EXPECT_EQ(*actual, *tc.expected);
} else {
EXPECT_FALSE(actual.ok());
}
}
INSTANTIATE_TEST_SUITE_P(GetCollectOpGroupMode, GetCollectOpGroupModeTest,
testing::ValuesIn(GetTestCases()));
}
namespace GetParticipatingDevicesTest {
struct TestCase {
xla::Array2D<int> device_assignment;
std::vector<std::vector<int64_t>> replica_groups;
bool has_channel_id;
std::optional<bool> use_global_device_ids;
struct CurrentIdAndOutput {
int current_id;
std::vector<int> expected_output;
};
std::vector<CurrentIdAndOutput> subtests;
std::vector<std::vector<int>> participating_device_groups;
bool expected_failure;
std::string ToString() const;
};
std::string TestCase::ToString() const {
std::ostringstream s;
absl::StatusOr<CollectiveOpGroupMode> group_mode =
GetCollectiveOpGroupMode(has_channel_id, use_global_device_ids);
if (group_mode.ok()) {
s << CollectiveOpGroupModeToString(*group_mode);
} else {
s << "Invalid";
}
s << "_" << device_assignment.n1() << "x" << device_assignment.n2();
s << "_" << (replica_groups.empty() ? "NoRG" : "RG");
s << "_" << subtests.size() << "SubTests";
return s.str();
}
std::ostream &operator<<(std::ostream &os, const TestCase &tc) {
os << tc.ToString();
return os;
}
std::vector<TestCase> GetTestCases() {
std::vector<TestCase> test_cases;
const std::vector<TestCase> cross_replica_test_cases = {
{
{{33}, {44}, {55}},
{},
false,
false,
{
{33, {33, 44, 55}},
{44, {33, 44, 55}},
},
{{33, 44, 55}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{},
false,
false,
{
{33, {33, 44, 55}},
{34, {34, 45, 56}},
{45, {34, 45, 56}},
},
{{33, 44, 55}, {34, 45, 56}},
false
},
{
{{33}, {44}, {55}},
{{0}, {1, 2}},
false,
false,
{
{33, {33}},
{44, {44, 55}},
},
{{ 33 }, {44, 55}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}},
false,
false,
{
{33, {33}},
{34, {34}},
{45, {45, 56}},
},
{{33}, {34}, {44, 55}, {45, 56}},
false
},
};
const std::vector<TestCase> cross_partition_test_cases = {
{
{
{33, 34, 35, 36}, {44, 45, 46, 47}, {55, 56, 57, 58}
},
{{0, 1}, {2, 3}},
true,
std::nullopt,
{
{33, {33, 34}},
{35, {35, 36}},
{45, {44, 45}},
{47, {46, 47}},
{58, {57, 58}},
},
{{33, 34}, {44, 45}, {55, 56},
{35, 36}, {46, 47}, {57, 58}},
false
}
};
const std::vector<TestCase> cross_replica_and_partition_test_cases = {
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}},
true,
false,
{
{33, {33, 34}},
{34, {33, 34}},
{45, {44, 45, 55, 56}},
},
{{33, 34}, {44, 45, 55, 56}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{},
true,
false,
{
{33, {33, 34, 44, 45, 55, 56}},
{34, {33, 34, 44, 45, 55, 56}},
{56, {33, 34, 44, 45, 55, 56}},
},
{{33, 34, 44, 45, 55, 56}},
false
},
};
const std::vector<TestCase> flattened_id_test_cases = {
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}, {3, 4, 5}},
true,
true,
{
{33, {33}},
{34, {34, 44}},
{44, {34, 44}},
{45, {45, 55, 56}},
{55, {45, 55, 56}},
{56, {45, 55, 56}},
},
{{33}, {34, 44}, {45, 55, 56}},
false
},
{
{{33}},
{},
true,
true,
{
{33, {33}},
},
{{33}},
true
},
};
const std::vector<TestCase> failure_test_cases = {
{
{{33}, {44}, {55}},
{},
false,
true,
{
{33, {}},
},
{{33, 44, 55}},
true
},
};
test_cases.insert(test_cases.end(), cross_replica_test_cases.begin(),
cross_replica_test_cases.end());
for (TestCase tc : cross_replica_test_cases) {
tc.use_global_device_ids = std::nullopt;
test_cases.push_back(tc);
}
test_cases.insert(test_cases.end(), cross_partition_test_cases.begin(),
cross_partition_test_cases.end());
test_cases.insert(test_cases.end(),
cross_replica_and_partition_test_cases.begin(),
cross_replica_and_partition_test_cases.end());
test_cases.insert(test_cases.end(), flattened_id_test_cases.begin(),
flattened_id_test_cases.end());
test_cases.insert(test_cases.end(), failure_test_cases.begin(),
failure_test_cases.end());
return test_cases;
}
class GetParticipatingDevicesTest : public testing::TestWithParam<TestCase> {};
TEST_P(GetParticipatingDevicesTest, Test) {
const TestCase &tc = GetParam();
int64_t num_replicas = tc.device_assignment.n1();
int64_t num_partitions = tc.device_assignment.n2();
DeviceAssignment device_assignment(num_replicas, num_partitions);
for (int64_t replica_id = 0; replica_id < num_replicas; ++replica_id) {
for (int64_t partition_id = 0; partition_id < num_partitions;
++partition_id) {
device_assignment(replica_id, partition_id) =
tc.device_assignment(replica_id, partition_id);
}
}
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups(tc.replica_groups);
absl::StatusOr<CollectiveOpGroupMode> group_mode =
GetCollectiveOpGroupMode(tc.has_channel_id, tc.use_global_device_ids);
if (!group_mode.ok()) {
EXPECT_TRUE(tc.expected_failure);
return;
}
for (const TestCase::CurrentIdAndOutput &subtest : tc.subtests) {
absl::StatusOr<std::vector<GlobalDeviceId>> actual =
GetParticipatingDevices(GlobalDeviceId(subtest.current_id),
device_assignment, replica_groups, *group_mode);
if (!actual.ok()) {
EXPECT_TRUE(tc.expected_failure);
continue;
}
std::vector<GlobalDeviceId> expected;
expected.reserve(subtest.expected_output.size());
absl::c_transform(subtest.expected_output, std::back_inserter(expected),
[](int id) { return GlobalDeviceId(id); });
EXPECT_EQ(*actual, expected);
}
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
actual_device_groups = GetParticipatingDevicesGroups(
device_assignment, replica_groups, *group_mode);
if (!actual_device_groups.ok()) {
EXPECT_TRUE(tc.expected_failure);
return;
}
std::vector<std::vector<GlobalDeviceId>> expect_device_groups;
expect_device_groups.reserve(tc.participating_device_groups.size());
for (auto subgroup : tc.participating_device_groups) {
std::vector<GlobalDeviceId> subgroup_device_ids;
subgroup_device_ids.reserve(subgroup.size());
absl::c_transform(subgroup, std::back_inserter(subgroup_device_ids),
[](int id) { return GlobalDeviceId(id); });
expect_device_groups.push_back(subgroup_device_ids);
}
EXPECT_THAT(*actual_device_groups,
testing::UnorderedElementsAreArray(expect_device_groups));
}
INSTANTIATE_TEST_SUITE_P(GetParticipatingDevices, GetParticipatingDevicesTest,
testing::ValuesIn(GetTestCases()));
}
namespace GetPariticipantCountsForReplicaGroupsTest {
struct TestCase {
std::string test_name;
std::vector<std::vector<int64_t>> replica_groups;
CollectiveOpGroupMode group_mode;
int64_t num_replicas;
int64_t num_partitions;
std::vector<int64_t> expected;
};
class GetPariticipantCountsForReplicaGroupsTest
: public testing::TestWithParam<TestCase> {};
TEST_P(GetPariticipantCountsForReplicaGroupsTest, Test) {
const TestCase &tc = GetParam();
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups(tc.replica_groups);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> actual,
GetPariticipantCountsForReplicaGroups(tc.num_replicas, tc.num_partitions,
replica_groups, tc.group_mode));
EXPECT_THAT(actual, testing::ElementsAreArray(tc.expected));
}
std::vector<TestCase> GetTestCases() {
return {
{
"CrossReplicaEmptyGroup",
{},
CollectiveOpGroupMode::kCrossReplica,
8,
1,
{8},
},
{
"CrossReplicaWithPartitions",
{{0, 1}, {2, 3}},
CollectiveOpGroupMode::kCrossReplica,
4,
2,
{2, 2, 2, 2},
},
{
"CrossReplicaAndPartition",
{{0, 1}, {2, 3}},
CollectiveOpGroupMode::kCrossReplicaAndPartition,
4,
2,
{4, 4},
},
{
"FlattenedID",
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
CollectiveOpGroupMode::kFlattenedID,
4,
2,
{1, 1, 1, 1, 1, 1, 1, 1},
},
};
}
INSTANTIATE_TEST_SUITE_P(
GetPariticipantCountsForReplicaGroups,
GetPariticipantCountsForReplicaGroupsTest,
testing::ValuesIn(GetTestCases()),
[](const testing::TestParamInfo<
GetPariticipantCountsForReplicaGroupsTest::ParamType> &info) {
return info.param.test_name;
});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_ops_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_ops_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
277ee37f-d713-49c2-8335-47aaf3e46bf8 | cpp | tensorflow/tensorflow | topk_rewriter | third_party/xla/xla/service/topk_rewriter.cc | third_party/xla/xla/service/topk_rewriter_test.cc | #include "xla/service/topk_rewriter.h"
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace m = match;
static absl::StatusOr<HloComputation*> BuilderToHloComputation(
XlaComputation& comp, HloComputation* sibling_computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comp.proto(), config));
HloModule* dest_module = sibling_computation->parent();
HloCloneContext context(dest_module);
return dest_module->DeepCloneComputation(new_module->entry_computation(),
&context);
}
static bool IsNanSafeGt(HloComputation* comp) {
namespace m = match;
auto match_bitcast_f32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_f32_with_convert = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
auto max_u32 =
m::Convert(m::ConstantScalar(std::numeric_limits<int32_t>::max()))
.WithShape(m::Shape().WithElementType(U32));
return m::Select(m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(m::Subtract(max_u32, param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16 = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16_with_convert = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
auto max_u32 =
m::Convert(m::ConstantScalar(std::numeric_limits<int32_t>::max()))
.WithShape(m::Shape().WithElementType(U32));
return m::Select(m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(m::Subtract(max_u32, param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_generic_iec559 = [](int64_t parameter_number,
PrimitiveType fp_type,
PrimitiveType int_type) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(fp_type));
auto signed_value = m::BitcastConvert(param).WithShape(
m::Shape().WithElementType(int_type));
int64_t bit_width = primitive_util::BitWidth(fp_type);
auto max_value = m::ConstantScalar(LsbMask<uint64_t>(bit_width - 1));
auto flipped_value = m::XorAnyOrder(max_value, signed_value);
auto is_negative = m::Lt(signed_value, m::ConstantScalar(0));
return m::Select(is_negative, flipped_value, signed_value);
};
auto match_generic_iec559_with_convert =
[](int64_t parameter_number, PrimitiveType param_type,
PrimitiveType fp_type, PrimitiveType int_type) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(param_type));
auto convert =
m::Convert(param).WithShape(m::Shape().WithElementType(fp_type));
auto signed_value = m::BitcastConvert(convert).WithShape(
m::Shape().WithElementType(int_type));
int64_t bit_width = primitive_util::BitWidth(fp_type);
auto max_value = m::ConstantScalar(LsbMask<uint64_t>(bit_width - 1));
auto flipped_value = m::XorAnyOrder(max_value, signed_value);
auto is_negative = m::Lt(signed_value, m::ConstantScalar(0));
return m::Select(is_negative, flipped_value, signed_value);
};
auto match_s32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(S32));
return param;
};
auto match_compare = [](PrimitiveType type) {
auto param0 = m::Parameter(0).WithShape(m::Shape().WithElementType(type));
auto param1 = m::Parameter(1).WithShape(m::Shape().WithElementType(type));
return m::Gt(param0, param1);
};
auto match_default_compare = [](PrimitiveType type) {
auto params_with_type = [&](int i, PrimitiveType t) {
return m::Parameter(i).WithShape(m::Shape().WithElementType(t));
};
auto params =
std::vector({
params_with_type(0, type), params_with_type(1, type),
params_with_type(2, S32), params_with_type(3, S32)});
auto const_true = m::Broadcast(m::Constant());
auto values_gt = m::Gt(params[0], params[1]);
return m::Select(const_true, values_gt, const_true);
};
auto match_all_types = [](HloInstruction* root, auto callback) {
bool result = false;
for (auto type : {BF16, F32, S32, U32}) {
result = result || Match(root, callback(type));
}
return result;
};
return Match(comp->root_instruction(),
m::Gt(match_generic_iec559(0, F32, S32),
match_generic_iec559(1, F32, S32))) ||
Match(comp->root_instruction(),
m::Gt(match_generic_iec559(0, BF16, S16),
match_generic_iec559(1, BF16, S16))) ||
Match(comp->root_instruction(),
m::Gt(match_generic_iec559_with_convert(0, BF16, F32, S32),
match_generic_iec559_with_convert(1, BF16, F32, S32))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_f32_with_convert(0),
match_bitcast_f32_with_convert(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16_with_convert(0),
match_bitcast_bf16_with_convert(1))) ||
Match(comp->root_instruction(), m::Gt(match_s32(0), match_s32(1))) ||
match_all_types(comp->root_instruction(), match_compare) ||
match_all_types(comp->root_instruction(), match_default_compare);
}
static bool HasIota(HloSortInstruction* sort, HloInstruction* data) {
namespace m = match;
const std::array<int64_t, 1> sort_dims = {
data->shape().dimensions(sort->sort_dimension())};
auto match_iota = [](auto dims) {
return m::Iota().WithShape(m::Shape().WithElementType(S32).WithDims(dims));
};
return Match(sort->operand(1), match_iota(data->shape().dimensions())) ||
Match(sort->operand(1), m::Broadcast(match_iota(sort_dims)));
}
std::optional<int64_t> TopkRewriter::SortIsInTopK(HloInstruction* inst) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
if (sort == nullptr) {
return std::nullopt;
}
if (sort->operand_count() != 1 && sort->operand_count() != 2) {
return std::nullopt;
}
HloInstruction* data = sort->mutable_operand(0);
if (sort->operand_count() == 2 && !HasIota(sort, data)) {
return std::nullopt;
}
if (!IsNanSafeGt(sort->to_apply())) {
return std::nullopt;
}
const int64_t sort_dim = sort->sort_dimension();
bool supported = true;
std::optional<int64_t> k;
for (HloInstruction* user : sort->users()) {
const HloInstruction* slice = user;
if (sort->operand_count() == 2) {
if (user->opcode() != HloOpcode::kGetTupleElement ||
user->user_count() != 1) {
supported = false;
break;
}
slice = user->users()[0];
}
if (slice->opcode() != HloOpcode::kSlice) {
supported = false;
break;
}
if (absl::c_any_of(slice->slice_starts(), [](int x) { return x != 0; }) ||
absl::c_any_of(slice->slice_strides(), [](int x) { return x != 1; })) {
supported = false;
break;
}
for (int64_t i = 0; i < slice->slice_limits().size(); ++i) {
if (i != sort_dim &&
slice->slice_limits(i) != slice->operand(0)->shape().dimensions(i)) {
supported = false;
break;
}
}
if (!supported) {
break;
}
if (k == std::nullopt) {
k = slice->slice_limits(sort_dim);
} else if (k != slice->slice_limits(sort_dim)) {
supported = false;
break;
}
}
if (k == std::nullopt || !supported) {
return std::nullopt;
}
return k;
}
struct TopKCustomCall {
HloInstruction* topk;
HloInstruction* value_gte;
HloInstruction* index_gte;
};
TopKCustomCall CreateTopKCustomCall(HloInstruction* input,
const int64_t sort_dim, const int64_t k,
HloComputation* comparator,
HloComputation* comp) {
Shape data_shape = input->shape();
PrimitiveType element_type = data_shape.element_type();
bool has_batch = data_shape.rank() >= 2;
int64_t input_size = data_shape.dimensions(sort_dim);
int64_t batch_size = 1;
Shape topk_input_shape;
if (has_batch) {
batch_size =
ShapeUtil::ElementsIn(data_shape) / data_shape.dimensions(sort_dim);
topk_input_shape =
ShapeUtil::MakeShape(element_type, {batch_size, input_size});
if (data_shape.rank() > 2) {
input = comp->AddInstruction(HloInstruction::CreateReshape(
sort_dim == 0
? ShapeUtil::MakeShape(element_type, {input_size, batch_size})
: ShapeUtil::MakeShape(element_type, {batch_size, input_size}),
input));
}
if (sort_dim == 0) {
input = comp->AddInstruction(
HloInstruction::CreateTranspose(topk_input_shape, input, {1, 0}));
}
} else {
topk_input_shape = data_shape;
}
Shape topk_shape =
has_batch
? ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, {batch_size, k}),
ShapeUtil::MakeShape(S32, {batch_size, k})})
: ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(element_type, {k}),
ShapeUtil::MakeShape(S32, {k})});
HloInstruction* topk = comp->AddInstruction(HloInstruction::CreateCustomCall(
topk_shape, {input}, comparator, "TopK"));
HloInstruction* value_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(0), topk, 0));
HloInstruction* index_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(1), topk, 1));
if (has_batch) {
if (sort_dim == 0) {
value_gte = comp->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(element_type, {k, batch_size}), value_gte,
{1, 0}));
index_gte = comp->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {k, batch_size}), index_gte, {1, 0}));
}
if (data_shape.rank() > 2) {
std::vector<int64_t> shape_dim(data_shape.dimensions().begin(),
data_shape.dimensions().end());
shape_dim[sort_dim] = k;
value_gte = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(element_type, shape_dim), value_gte));
index_gte = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, shape_dim), index_gte));
}
}
return {topk, value_gte, index_gte};
}
absl::StatusOr<HloInstruction*> TopkRewriter::TransformPatternToCustomCall(
HloInstruction* inst) {
std::optional<int64_t> k = SortIsInTopK(inst);
if (!k) {
return nullptr;
}
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
HloInstruction* data = sort->mutable_operand(0);
const PrimitiveType element_type = data->shape().element_type();
if (element_type != F32 && element_type != BF16) {
return nullptr;
}
const int64_t sort_dim = sort->sort_dimension();
if (sort_dim != 0 && sort_dim != data->shape().rank() - 1) {
return nullptr;
}
if (!is_profitable_to_convert_(sort, *k)) {
return nullptr;
}
TopKCustomCall topkcc = CreateTopKCustomCall(
data, sort_dim, k.value(), sort->to_apply(), inst->parent());
for (HloInstruction* user : sort->users()) {
if (sort->operand_count() == 2) {
HloInstruction* gte = user;
for (HloInstruction* slice : gte->users()) {
if (gte->tuple_index() == 0) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(topkcc.value_gte));
} else if (gte->tuple_index() == 1) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(topkcc.index_gte));
} else {
LOG(FATAL) << "Sort with more than 2 output isn't supported in "
"topk rewriter";
}
}
} else {
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(topkcc.value_gte));
}
}
return topkcc.topk;
}
absl::StatusOr<bool> TopkRewriter::TransformToCustomCall(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(HloInstruction * topkcc,
TransformPatternToCustomCall(inst));
if (topkcc != nullptr) {
VLOG(2) << "Rewritten Topk: " << topkcc->ToString();
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> TopkRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(auto transform_to_customcall_changed,
TransformToCustomCall(module, execution_threads));
changed |= transform_to_customcall_changed;
return changed;
}
class TopkDecomposerVisitor : public DfsHloRewriteVisitor {
public:
explicit TopkDecomposerVisitor(HloPredicate should_decompose)
: should_decompose_(should_decompose) {}
absl::Status HandleCustomCall(HloInstruction* inst) override {
if (should_decompose_ && !should_decompose_(inst)) {
return absl::OkStatus();
}
HloCustomCallInstruction* call = DynCast<HloCustomCallInstruction>(inst);
if (call == nullptr || call->custom_call_target() != "TopK") {
return absl::OkStatus();
}
HloComputation* comparator = call->to_apply();
return DecomposeTopK(call, comparator);
}
absl::Status HandleTopK(HloInstruction* topk) override {
if (should_decompose_ && !should_decompose_(topk)) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(HloComputation * comparator,
CreateVariadicComparator(topk));
return DecomposeTopK(topk, comparator);
}
private:
bool HasSingleUserReadingOnlyTheValueOutput(HloInstruction* inst) {
return inst->user_count() == 1 && inst->users().front()->tuple_index() == 0;
}
absl::StatusOr<HloComputation*> CreateVariadicComparator(
HloInstruction* inst) {
HloTopKInstruction* topk = DynCast<HloTopKInstruction>(inst);
XlaBuilder b(absl::StrCat("comparator_", topk->name()));
std::vector<PrimitiveType> ptypes = {
topk->operand(0)->shape().element_type()};
if (!HasSingleUserReadingOnlyTheValueOutput(inst)) {
ptypes.emplace_back(PrimitiveType::S32);
}
XlaComputation comparison = topk->largest()
? CreateScalarGtComputation(ptypes, &b)
: CreateScalarLtComputation(ptypes, &b);
TF_ASSIGN_OR_RETURN(HloComputation * comparator,
BuilderToHloComputation(comparison, topk->parent()));
return comparator;
}
absl::Status DecomposeTopK(HloInstruction* call,
HloComputation* variadic_comparator) {
HloComputation* comp = call->parent();
HloInstruction* input = call->mutable_operand(0);
Shape iota_shape = input->shape();
iota_shape.set_element_type(S32);
size_t sort_dimension = input->shape().dimensions_size() - 1;
std::vector<int64_t> zeroes(iota_shape.rank(), 0);
std::vector<int64_t> ones(iota_shape.rank(), 1);
auto slice_tuple = [&](HloInstruction* sort, const size_t index) {
return comp->AddInstruction(HloInstruction::CreateSlice(
call->shape().tuple_shapes(index),
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
sort->shape().tuple_shapes(index), sort, index)),
zeroes, call->shape().tuple_shapes(index).dimensions(), ones));
};
CHECK_NE(variadic_comparator, nullptr);
if (HasSingleUserReadingOnlyTheValueOutput(call) &&
variadic_comparator->num_parameters() == 2) {
HloInstruction* sort = comp->AddInstruction(HloInstruction::CreateSort(
{input->shape()}, sort_dimension, {input}, variadic_comparator,
true));
TF_RETURN_IF_ERROR(ReplaceInstruction(
call->users().front(),
comp->AddInstruction(HloInstruction::CreateSlice(
call->shape().tuple_shapes(0), sort, zeroes,
call->shape().tuple_shapes(0).dimensions(), ones))));
sort->set_metadata(call->metadata());
} else {
HloInstruction* iota = comp->AddInstruction(
HloInstruction::CreateIota(iota_shape, iota_shape.rank() - 1));
HloInstruction* sort = comp->AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({input->shape(), iota_shape}),
sort_dimension, {input, iota}, variadic_comparator,
true));
TF_RETURN_IF_ERROR(ReplaceInstruction(
call, comp->AddInstruction(HloInstruction::CreateTuple(
{slice_tuple(sort, 0), slice_tuple(sort, 1)}))));
sort->set_metadata(call->metadata());
}
return absl::OkStatus();
}
private:
HloPredicate should_decompose_;
};
absl::StatusOr<bool> TopkDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return TopkDecomposerVisitor(should_decompose_)
.RunOnModule(module, execution_threads);
}
} | #include "xla/service/topk_rewriter.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <optional>
#include <utility>
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace m = ::xla::match;
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::tsl::testing::IsOkAndHolds;
using TopkRewriterTest = HloTestBase;
std::string getComparator() {
return R"(
%compare {
%p.1.lhs.8 = s32[] parameter(2)
%p.1.rhs.9 = s32[] parameter(3)
%p.0.lhs.6 = f32[] parameter(0)
%bitcast-convert.11 = s32[] bitcast-convert(%p.0.lhs.6)
%constant.15 = s32[] constant(0)
%compare.16 = pred[] compare(%bitcast-convert.11, %constant.15), direction=LT
%constant.10 = u32[] constant(2147483647)
%bitcast-convert.12 = u32[] bitcast-convert(%p.0.lhs.6)
%subtract.13 = u32[] subtract(%constant.10, %bitcast-convert.12)
%bitcast-convert.14 = s32[] bitcast-convert(%subtract.13)
%select.17 = s32[] select(%compare.16, %bitcast-convert.14,
%bitcast-convert.11)
%p.0.rhs.7 = f32[] parameter(1)
%bitcast-convert.19 = s32[] bitcast-convert(%p.0.rhs.7)
%constant.23 = s32[] constant(0)
%compare.24 = pred[] compare(%bitcast-convert.19, %constant.23), direction=LT
%constant.18 = u32[] constant(2147483647)
%bitcast-convert.20 = u32[] bitcast-convert(%p.0.rhs.7)
%subtract.21 = u32[] subtract(%constant.18, %bitcast-convert.20)
%bitcast-convert.22 = s32[] bitcast-convert(%subtract.21)
%select.25 = s32[] select(%compare.24, %bitcast-convert.22,
%bitcast-convert.19)
ROOT %compare.26 = pred[] compare(%select.17, %select.25), direction=GT
})";
}
std::string getConvertMaxComparator() {
return R"(
%compare {
%p.1.lhs.6 = s32[] parameter(2)
%p.1.rhs.7 = s32[] parameter(3)
%p.0.lhs.4 = f32[] parameter(0)
%bitcast-convert = s32[] bitcast-convert(f32[] %p.0.lhs.4)
%constant = s32[] constant(0)
%compare = pred[] compare(s32[] %bitcast-convert, s32[] %constant), direction=LT
%constant.1 = s32[] constant(2147483647)
%convert = u32[] convert(s32[] %constant.1)
%bitcast-convert.1 = u32[] bitcast-convert(f32[] %p.0.lhs.4)
%subtract = u32[] subtract(u32[] %convert, u32[] %bitcast-convert.1)
%bitcast-convert.2 = s32[] bitcast-convert(u32[] %subtract)
%select = s32[] select(pred[] %compare, s32[] %bitcast-convert.2, s32[] %bitcast-convert)
%p.0.rhs.5 = f32[] parameter(1)
%bitcast-convert.3 = s32[] bitcast-convert(f32[] %p.0.rhs.5)
%compare.1 = pred[] compare(s32[] %bitcast-convert.3, s32[] %constant), direction=LT
%bitcast-convert.4 = u32[] bitcast-convert(f32[] %p.0.rhs.5)
%subtract.1 = u32[] subtract(u32[] %convert, u32[] %bitcast-convert.4)
%bitcast-convert.5 = s32[] bitcast-convert(u32[] %subtract.1)
%select.1 = s32[] select(pred[] %compare.1, s32[] %bitcast-convert.5, s32[] %bitcast-convert.3)
ROOT %compare.2 = pred[] compare(s32[] %select, s32[] %select.1), direction=GT
})";
}
std::string getComparatorNoIota() {
return R"(
%compare {
%p.0.lhs.6 = f32[] parameter(0)
%bitcast-convert.11 = s32[] bitcast-convert(%p.0.lhs.6)
%constant.15 = s32[] constant(0)
%compare.16 = pred[] compare(%bitcast-convert.11, %constant.15), direction=LT
%constant.10 = u32[] constant(2147483647)
%bitcast-convert.12 = u32[] bitcast-convert(%p.0.lhs.6)
%subtract.13 = u32[] subtract(%constant.10, %bitcast-convert.12)
%bitcast-convert.14 = s32[] bitcast-convert(%subtract.13)
%select.17 = s32[] select(%compare.16, %bitcast-convert.14,
%bitcast-convert.11)
%p.0.rhs.7 = f32[] parameter(1)
%bitcast-convert.19 = s32[] bitcast-convert(%p.0.rhs.7)
%constant.23 = s32[] constant(0)
%compare.24 = pred[] compare(%bitcast-convert.19, %constant.23), direction=LT
%constant.18 = u32[] constant(2147483647)
%bitcast-convert.20 = u32[] bitcast-convert(%p.0.rhs.7)
%subtract.21 = u32[] subtract(%constant.18, %bitcast-convert.20)
%bitcast-convert.22 = s32[] bitcast-convert(%subtract.21)
%select.25 = s32[] select(%compare.24, %bitcast-convert.22,
%bitcast-convert.19)
ROOT %compare.26 = pred[] compare(%select.17, %select.25), direction=GT
})";
}
std::string getCompareComparator() {
return R"(
%compare {
%Arg_0.100 = f32[] parameter(0)
%Arg_1.101 = f32[] parameter(1)
%Arg_2.102 = s32[] parameter(2)
%Arg_3.103 = s32[] parameter(3)
ROOT %compare.56364 = pred[] compare(f32[] %Arg_0.100, f32[] %Arg_1.101), direction=GT, type=TOTALORDER
})";
}
std::string getStableComparator() {
return R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
})";
}
bool IsStableSort(const HloInstruction* inst) {
auto* sort = DynCast<HloSortInstruction>(inst);
return sort != nullptr && sort->is_stable();
}
TEST_F(TopkRewriterTest, Rewrite) {
for (std::string comparator :
{getComparator(), getCompareComparator(), getStableComparator()}) {
const std::string hlo_string = R"(
HloModule module
)" + comparator + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
}
TEST_F(TopkRewriterTest, RewriteWithBroadcast) {
for (std::string comparator :
{getComparator(), getCompareComparator(), getStableComparator()}) {
const std::string hlo_string = R"(
HloModule module
)" + comparator + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[1234567]{0} iota(), iota_dimension=0
%broadcast.5 = s32[8,1234567]{1,0} broadcast(iota.4), dimensions={1}
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %broadcast.5),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
}
TEST_F(TopkRewriterTest, RewriteWithConvertMaxComparator) {
const std::string hlo_string = R"(
HloModule module
)" + getConvertMaxComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteUnbatched) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234567] parameter(0)
%iota.4 = s32[1234567] iota(), iota_dimension=0
%sort.27 = (f32[1234567], s32[1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteTranspose) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234567,8] parameter(0)
%iota.4 = s32[1234567,8] iota(), iota_dimension=0
%sort.27 = (f32[1234567,8], s32[1234567,8]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234567,8] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5,8] slice(%get-tuple-element.28), slice={[0:5], [0:8]}
%get-tuple-element.30 = s32[1234567,8] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5,8] slice(%get-tuple-element.30), slice={[0:5], [0:8]}
ROOT %tuple.32 = (f32[5,8], s32[5,8]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
LOG(INFO) << module->entry_computation()->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Transpose(m::GetTupleElement(
m::CustomCall(m::Transpose(m::Parameter(0))), 0)),
m::Transpose(m::GetTupleElement(
m::CustomCall(m::Transpose(m::Parameter(0))), 1)))));
const HloInstruction* cc = module->entry_computation()
->root_instruction()
->operand(0)
->operand(0)
->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteReshape) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[3,8,1234567] parameter(0)
%iota.4 = s32[3,8,1234567] iota(), iota_dimension=2
%sort.27 = (f32[3,8,1234567], s32[3,8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={2}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[3, 8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[3,8,5] slice(%get-tuple-element.28), slice={[0:3], [0:8], [0:5]}
%get-tuple-element.30 = s32[3,8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[3,8,5] slice(%get-tuple-element.30), slice={[0:3], [0:8], [0:5]}
ROOT %tuple.32 = (f32[3,8,5], s32[3,8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(m::Reshape(m::Parameter(0))), 0)),
m::Reshape(m::GetTupleElement(
m::CustomCall(m::Reshape(m::Parameter(0))), 1)))));
const HloInstruction* cc = module->entry_computation()
->root_instruction()
->operand(0)
->operand(0)
->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteNoIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparatorNoIota() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%sort.27 = f32[8,1234567] sort(%arg_tuple.1), dimensions={1}, is_stable=true, to_apply=%compare
ROOT %slice.29 = f32[8,5] slice(%sort.27), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RoundTripNoIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparatorNoIota() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%sort.27 = f32[8,1234567] sort(%arg_tuple.1), dimensions={1}, is_stable=true, to_apply=%compare
ROOT %slice.29 = f32[8,5] slice(%sort.27), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(
m::Sort(m::Parameter(0)).WithPredicate(IsStableSort))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTripOnlyIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[1234567]{0} iota(), iota_dimension=0
%broadcast.5 = s32[8,1234567]{1,0} broadcast(iota.4), dimensions={1}
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %broadcast.5),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = s32[8,1234567] get-tuple-element(%sort.27), index=1
ROOT %slice.29 = s32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort),
1))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTrip) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher =
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::GetTupleElement(sort_matcher, 0)),
m::Slice(m::GetTupleElement(sort_matcher, 1)))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTripValueOnly) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
ROOT %slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher =
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(m::GetTupleElement(sort_matcher, 0))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, SanityCheckOutput) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234] parameter(0)
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto topk_module = source_module->Clone();
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(topk_module.get()),
IsOkAndHolds(true));
auto decomposed_module = topk_module->Clone();
EXPECT_THAT(TopkDecomposer().Run(decomposed_module.get()),
IsOkAndHolds(true));
const size_t source_size = 1234;
std::vector<float> source(source_size);
std::iota(source.begin(), source.end(), 80000);
auto input = LiteralUtil::CreateR1<float>(source);
std::vector<float> top_k({81233, 81232, 81231, 81230, 81229});
auto check_result = [&](std::unique_ptr<HloModule> module) {
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&input}));
LiteralTestUtil::ExpectR1Equal<float>(top_k, result.DecomposeTuple()[0]);
};
check_result(std::move(source_module));
check_result(std::move(decomposed_module));
}
TEST_F(TopkRewriterTest, Equivalent) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234] parameter(0)
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
};
EXPECT_TRUE(
RunAndCompare(std::move(source_module), std::nullopt, round_trip));
}
TEST_F(TopkRewriterTest, DecomposerStability) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%constant.1 = f32[] constant(42)
%broadcast.2= f32[1234] broadcast(f32[] %constant.1), dimensions={}
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%broadcast.2, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
};
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(source_module), std::nullopt,
round_trip));
}
TEST_F(TopkRewriterTest, TopKDecomposition) {
const std::string hlo_string = R"(
HloModule topk
ENTRY TopK {
x = bf16[10,10]{0,1} parameter(0)
ROOT topk = (bf16[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher = op::Sort(op::Parameter(0), op::Iota());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Slice(op::GetTupleElement(sort_matcher, 0)),
op::Slice(op::GetTupleElement(sort_matcher, 1))));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/topk_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/topk_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5edad00f-282c-410e-9002-a59cd79e26b9 | cpp | tensorflow/tensorflow | host_offloader | third_party/xla/xla/service/host_offloader.cc | third_party/xla/xla/service/host_offloader_test.cc | #include "xla/service/host_offloader.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <iomanip>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_value.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/host_offload_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::xla::host_offload_utils::InstructionAndShapeIndex;
void SetMemorySpace(Shape* shape, int64_t memory_space_color) {
CHECK(shape->has_layout());
shape->mutable_layout()->set_memory_space(memory_space_color);
}
bool SetBuffersToMemorySpaceColor(
const std::vector<InstructionAndShapeIndex>& buffers_to_set_to_host_memory,
int64_t memory_space_color) {
bool changed = false;
for (const auto& instr_and_shape : buffers_to_set_to_host_memory) {
VLOG(2) << absl::StreamFormat("Setting %s to memory space %d",
instr_and_shape.ToString(),
memory_space_color);
Shape* shape = ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index);
CHECK(shape->has_layout()) << "Shape must have a layout";
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index),
memory_space_color);
changed = true;
}
return changed;
}
}
bool HostOffloader::InstructionIsAllowedBetweenMoveToHostAndDus(
const HloInstruction* instruction) const {
if (instruction->opcode() == HloOpcode::kReshape) {
return ShapeUtil::ReshapeIsBitcast(instruction->operand(0)->shape(),
instruction->shape());
}
return (instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kCopy);
}
bool HostOffloader::InstructionIsAllowedBetweenDsAndMoveToDevice(
const HloInstruction* instruction) const {
if (instruction->opcode() == HloOpcode::kReduce) {
return ShapeUtil::TrueRank(instruction->operand(0)->shape()) ==
ShapeUtil::TrueRank(instruction->shape());
}
if (instruction->opcode() == HloOpcode::kReshape) {
return ShapeUtil::ReshapeIsBitcast(instruction->operand(0)->shape(),
instruction->shape());
}
return instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kCopy;
}
absl::StatusOr<bool> HostOffloader::WalkDownHostMemoryOffloadPaths(
const InstructionAndShapeIndex& starting_instruction_and_index,
bool insert_copy_before) {
bool changed = false;
absl::flat_hash_set<HloInstruction*> mth_custom_calls_to_remove;
absl::flat_hash_set<HloInstruction*> slices_to_dynamify;
absl::flat_hash_set<HloInstruction*> custom_calls_to_insert_copies_before;
std::vector<InstructionAndShapeIndex> buffers_to_set_to_host_memory;
std::vector<HloInstruction*> dynamic_update_slices;
HloInstruction* starting_instruction =
starting_instruction_and_index.instruction;
std::queue<InstructionAndShapeIndex> queue;
queue.push(starting_instruction_and_index);
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape_index = queue.front();
queue.pop();
HloInstruction* instruction = instruction_and_shape_index.instruction;
VLOG(4) << absl::StreamFormat("Visiting instruction: %s",
instruction_and_shape_index.ToString());
bool already_saved_buffer = false;
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() ==
host_memory_offload_annotations::kMoveToHostCustomCallTarget) {
already_visited_move_to_host_custom_calls_.insert(instruction);
mth_custom_calls_to_remove.insert(instruction);
} else if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() ==
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget) {
custom_calls_to_insert_copies_before.insert(instruction);
continue;
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
if (instruction == starting_instruction) {
dynamic_update_slices.push_back(instruction);
} else {
dynamic_update_slices_already_allocated_.insert(instruction);
}
} else if (host_offload_utils::IsValidDuringPureMemoryOffload(
instruction)) {
if (instruction->opcode() == HloOpcode::kAsyncStart) {
already_saved_buffer = true;
} else if (instruction->opcode() == HloOpcode::kAsyncDone) {
HloInstruction* async_start = instruction->mutable_operand(0);
buffers_to_set_to_host_memory.emplace_back(async_start, ShapeIndex{1});
} else if (instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
if (caller->opcode() == HloOpcode::kAsyncStart) {
ShapeIndex tmp_index = instruction_and_shape_index.shape_index;
tmp_index.push_front(instruction->parameter_number());
tmp_index.push_front(
0);
buffers_to_set_to_host_memory.emplace_back(caller, tmp_index);
}
}
}
} else if (instruction->opcode() == HloOpcode::kDynamicSlice) {
TF_RETURN_IF_ERROR(
ValidateSliceLeadsToMoveToDeviceCustomCall(instruction));
continue;
} else if (instruction->opcode() == HloOpcode::kSlice) {
TF_RETURN_IF_ERROR(
ValidateSliceLeadsToMoveToDeviceCustomCall(instruction));
slices_to_dynamify.insert(instruction);
continue;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("Tensor which is moved to host (starting from "
"\"%s\") is used by an instruction (\"%s\") which is "
"not acceptable during pure memory offload.",
starting_instruction->name(), instruction->name()));
}
if (!already_saved_buffer) {
VLOG(5) << "Saving " << instruction_and_shape_index.ToString()
<< " to be set to host memory.";
buffers_to_set_to_host_memory.push_back(instruction_and_shape_index);
}
if (instruction->IsRoot() && instruction->parent()->IsEntryComputation()) {
const Shape& output_shape = ShapeUtil::GetSubshape(
instruction->GetModule()->entry_computation_layout().result_shape(),
instruction_and_shape_index.shape_index);
CHECK(output_shape.has_layout())
<< "Expecting output shape of entry computation to have a layout.";
if (output_shape.layout().memory_space() == kHostMemorySpaceColor) {
VLOG(2) << absl::StreamFormat(
"Memory offloaded starting from %s is output streamed",
starting_instruction_and_index.ToString());
continue;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("Tensor which is moved to host (starting from %s) "
"is returned from the entry computation but the "
"layout for this output is not set to host memory.",
starting_instruction->name()));
}
}
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(instruction_and_shape_index));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
const bool set_buffers_changed = SetBuffersToMemorySpaceColor(
buffers_to_set_to_host_memory, kHostMemorySpaceColor);
changed = changed || set_buffers_changed;
for (HloInstruction* dus : dynamic_update_slices) {
TF_RETURN_IF_ERROR(CreateAllocateBufferForDynamicUpdateSlice(dus));
changed = true;
}
if (insert_copy_before) {
const auto predecessors =
host_offload_utils::GetPredecessors(starting_instruction_and_index);
CHECK_EQ(predecessors.size(), 1);
TF_ASSIGN_OR_RETURN(bool inserted_copy,
InsertCopyBetween(predecessors.front(),
starting_instruction_and_index));
changed = changed || inserted_copy;
}
for (HloInstruction* custom_call : custom_calls_to_insert_copies_before) {
HloInstruction* data_to_copy = custom_call->mutable_operand(0);
HloInstruction* copy_to_device =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_device->mutable_shape(),
Layout::kDefaultMemorySpace);
VLOG(1) << absl::StreamFormat(
"Inserted copy \"%s\" before custom call \"%s\"",
copy_to_device->name(), custom_call->name());
TF_RETURN_IF_ERROR(custom_call->ReplaceAllUsesWith(copy_to_device));
changed = true;
}
for (HloInstruction* custom_call : mth_custom_calls_to_remove) {
VLOG(1) << absl::StreamFormat("Removing MoveToHost custom call \"%s\"",
custom_call->name());
TF_RETURN_IF_ERROR(
custom_call->ReplaceAllUsesWith(custom_call->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call->parent()->RemoveInstruction(custom_call));
changed = true;
}
for (HloInstruction* slice : slices_to_dynamify) {
TF_ASSIGN_OR_RETURN(HloInstruction * dynamic_slice, DynamifySlice(slice));
validated_slices_.insert(dynamic_slice);
changed = true;
}
return changed;
}
absl::StatusOr<bool> HostOffloader::HandleInputStreaming(
HloComputation* entry_computation) {
bool changed = false;
const ComputationLayout& entry_computation_layout =
entry_computation->parent()->entry_computation_layout();
for (int i = 0; i < entry_computation_layout.parameter_count(); ++i) {
if (entry_computation_layout.parameter_shape(i).IsToken()) {
LOG(WARNING) << "Token parameters are not supported for streaming.";
continue;
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
entry_computation_layout.parameter_shape(i),
[&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.has_layout() &&
subshape.layout().memory_space() == kHostMemorySpaceColor) {
HloInstruction* parameter_instruction =
entry_computation->parameter_instruction(i);
VLOG(1) << "Host parameter streamed into program with shape: "
<< subshape.ToString(true) << " at index "
<< index.ToString();
TF_ASSIGN_OR_RETURN(
bool result,
WalkDownHostMemoryOffloadPaths(
InstructionAndShapeIndex(parameter_instruction, index),
false));
changed = changed || result;
}
return absl::OkStatus();
}));
}
return changed;
}
absl::StatusOr<bool> HostOffloader::HandleMoveToHostCustomCall(
HloInstruction* custom_call_instruction) {
if (already_visited_move_to_host_custom_calls_.contains(
custom_call_instruction)) {
return false;
}
VLOG(1) << "Offloading " << custom_call_instruction->operand(0)->name()
<< " to host.";
TF_ASSIGN_OR_RETURN(
std::vector<InstructionAndShapeIndex> starting_instruction_and_shapes,
GetStartingInstructions(custom_call_instruction));
if (starting_instruction_and_shapes.empty()) {
if (custom_call_instruction == custom_call_instruction->GetModule()
->entry_computation()
->root_instruction()) {
HloInstruction* data_to_copy =
custom_call_instruction->mutable_operand(0);
HloInstruction* copy_to_host =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_host->mutable_shape(), kHostMemorySpaceColor);
TF_RETURN_IF_ERROR(
custom_call_instruction->ReplaceAllUsesWith(copy_to_host));
VLOG(2) << absl::StreamFormat(
"Custom call \"%s\" is entry computation root. Inserted copy \"%s\" "
"and replaced root instruction.",
custom_call_instruction->name(), copy_to_host->name());
}
}
for (const InstructionAndShapeIndex& starting_instruction_and_shape :
starting_instruction_and_shapes) {
const bool should_insert_copy_before_instruction =
starting_instruction_and_shape.instruction->opcode() !=
HloOpcode::kDynamicUpdateSlice;
TF_ASSIGN_OR_RETURN(
bool result,
WalkDownHostMemoryOffloadPaths(starting_instruction_and_shape,
should_insert_copy_before_instruction));
(void)result;
}
already_visited_move_to_host_custom_calls_.insert(custom_call_instruction);
VLOG(2) << absl::StreamFormat("Removing MoveToHost custom call \"%s\"",
custom_call_instruction->name());
TF_RETURN_IF_ERROR(custom_call_instruction->ReplaceAllUsesWith(
custom_call_instruction->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call_instruction->parent()->RemoveInstruction(
custom_call_instruction));
return true;
}
absl::StatusOr<bool> HostOffloader::HandleMoveToDeviceCustomCall(
HloInstruction* custom_call_instruction) {
VLOG(2) << absl::StreamFormat("Removing MoveToDevice custom call \"%s\"",
custom_call_instruction->name());
TF_RETURN_IF_ERROR(custom_call_instruction->ReplaceAllUsesWith(
custom_call_instruction->mutable_operand(0)));
TF_RETURN_IF_ERROR(custom_call_instruction->parent()->RemoveInstruction(
custom_call_instruction));
move_to_device_custom_calls_to_remove_.insert(custom_call_instruction);
return true;
}
absl::StatusOr<bool> HostOffloader::InsertCopyBetween(
const InstructionAndShapeIndex& before_instruction_and_index,
const InstructionAndShapeIndex& after_instruction_and_index) {
bool changed = false;
HloInstruction* after_instruction = after_instruction_and_index.instruction;
std::vector<InstructionAndShapeIndex> instructions_to_insert_copies_before;
if (after_instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(after_instruction->GetModule());
auto callers =
call_graph->GetComputationCallers(after_instruction->parent());
for (HloInstruction* caller : callers) {
const auto indices =
caller->OperandIndices(before_instruction_and_index.instruction);
for (int64_t index : indices) {
instructions_to_insert_copies_before.push_back(
InstructionAndShapeIndex{caller, {index}});
}
}
} else {
instructions_to_insert_copies_before.push_back(after_instruction_and_index);
}
for (const InstructionAndShapeIndex& instruction_and_index :
instructions_to_insert_copies_before) {
if (already_inserted_copy_before_.find(instruction_and_index) ==
already_inserted_copy_before_.end()) {
HloInstruction* data_to_copy = before_instruction_and_index.instruction;
HloInstruction* copy_to_host;
auto it = copies_created_after_.find(data_to_copy);
if (it == copies_created_after_.end()) {
copy_to_host =
data_to_copy->parent()->AddInstruction(HloInstruction::CreateUnary(
data_to_copy->shape(), HloOpcode::kCopy, data_to_copy));
SetMemorySpace(copy_to_host->mutable_shape(), kHostMemorySpaceColor);
copies_created_after_[data_to_copy] = copy_to_host;
} else {
copy_to_host = it->second;
}
const int64_t operand_index =
after_instruction_and_index.shape_index.empty()
? 0
: after_instruction_and_index.shape_index.front();
TF_RETURN_IF_ERROR(instruction_and_index.instruction->ReplaceOperandWith(
operand_index, copy_to_host));
VLOG(2) << absl::StreamFormat(
"Inserted copy \"%s\" between \"%s\" and \"%s\"",
copy_to_host->name(), before_instruction_and_index.ToString(),
after_instruction_and_index.ToString());
already_inserted_copy_before_.insert(instruction_and_index);
changed = true;
}
}
return changed;
}
absl::StatusOr<std::vector<InstructionAndShapeIndex>>
HostOffloader::GetStartingInstructions(
HloInstruction* custom_call_instruction) {
std::vector<InstructionAndShapeIndex> result;
std::queue<InstructionAndShapeIndex> queue;
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors_of_custom_call,
host_offload_utils::GetSuccessors(
InstructionAndShapeIndex(custom_call_instruction)));
for (const InstructionAndShapeIndex& successor : successors_of_custom_call) {
queue.push(successor);
}
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape = queue.front();
queue.pop();
HloInstruction* current_instruction = instruction_and_shape.instruction;
if (current_instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
result.push_back(instruction_and_shape);
continue;
} else if (!InstructionIsAllowedBetweenMoveToHostAndDus(
current_instruction)) {
result.push_back(instruction_and_shape);
continue;
} else {
}
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(instruction_and_shape));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
return result;
}
absl::Status HostOffloader::ValidateSliceLeadsToMoveToDeviceCustomCall(
HloInstruction* slice) {
if (validated_slices_.find(slice) != validated_slices_.end()) {
return absl::OkStatus();
}
CHECK(slice->opcode() == HloOpcode::kDynamicSlice ||
slice->opcode() == HloOpcode::kSlice)
<< "This function must only be called with a slice or dynamic slice.";
std::queue<InstructionAndShapeIndex> queue;
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors_of_slice,
host_offload_utils::GetSuccessors(InstructionAndShapeIndex(slice)));
for (const InstructionAndShapeIndex& successor : successors_of_slice) {
queue.push(successor);
}
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape = queue.front();
queue.pop();
HloInstruction* current_instruction = instruction_and_shape.instruction;
if (current_instruction->opcode() == HloOpcode::kCustomCall &&
current_instruction->custom_call_target() ==
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget) {
continue;
}
if (!InstructionIsAllowedBetweenDsAndMoveToDevice(current_instruction)) {
return absl::InvalidArgumentError(absl::StrFormat(
"Tensor which is moved to host and back to device (ending at \"%s\") "
"has an invalid instruction (\"%s\") between DynamicSlice/Slice and "
"the MoveToDevice custom call.",
slice->name(), current_instruction->name()));
}
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(instruction_and_shape));
for (const InstructionAndShapeIndex& successor : successors) {
queue.push(successor);
}
}
validated_slices_.insert(slice);
return absl::OkStatus();
}
absl::Status HostOffloader::CreateAllocateBufferForDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
if (dynamic_update_slices_already_allocated_.find(dynamic_update_slice) !=
dynamic_update_slices_already_allocated_.end()) {
return absl::OkStatus();
}
VLOG(2) << absl::StreamFormat(
"Creating a AllocateBuffer in host memory space for \"%s\"",
dynamic_update_slice->name());
std::queue<InstructionAndShapeIndex> queue;
queue.push(InstructionAndShapeIndex(dynamic_update_slice));
bool found_broadcast = false;
while (!queue.empty()) {
InstructionAndShapeIndex instruction_and_shape = queue.front();
queue.pop();
VLOG(2) << absl::StreamFormat("Setting %s to have host memory space",
instruction_and_shape.ToString());
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instruction_and_shape.instruction->mutable_shape(),
instruction_and_shape.shape_index),
kHostMemorySpaceColor);
HloInstruction* instruction = instruction_and_shape.instruction;
if (instruction->opcode() == HloOpcode::kParameter) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(instruction->GetModule());
const std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
for (HloInstruction* caller : callers) {
if (caller->opcode() == HloOpcode::kWhile) {
CHECK(caller->while_body() == instruction->parent())
<< "We assume that we're starting from the while body";
HloComputation* while_condition_computation =
caller->while_condition();
CHECK(while_condition_computation->num_parameters() == 1)
<< "Expecting While to have just 1 parameter";
HloInstruction* while_condition_parameter =
while_condition_computation->parameter_instruction(0);
VLOG(2) << absl::StreamFormat("Setting %s to have host memory space",
while_condition_parameter->name());
SetMemorySpace(ShapeUtil::GetMutableSubshape(
while_condition_parameter->mutable_shape(),
instruction_and_shape.shape_index),
kHostMemorySpaceColor);
std::queue<InstructionAndShapeIndex> nested_queue;
nested_queue.push(InstructionAndShapeIndex(
while_condition_parameter, instruction_and_shape.shape_index));
while (!nested_queue.empty()) {
InstructionAndShapeIndex nested_instruction_and_shape =
nested_queue.front();
nested_queue.pop();
if (!host_offload_utils::IsValidDuringPureMemoryOffload(
nested_instruction_and_shape.instruction)) {
return absl::InvalidArgumentError(absl::StrFormat(
"Tensor which is moved to host is used by an invalid "
"instruction (\"%s\") during while condition body.",
nested_instruction_and_shape.instruction->name()));
}
SetMemorySpace(
ShapeUtil::GetMutableSubshape(
nested_instruction_and_shape.instruction->mutable_shape(),
nested_instruction_and_shape.shape_index),
kHostMemorySpaceColor);
TF_ASSIGN_OR_RETURN(
const std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(
nested_instruction_and_shape));
for (const InstructionAndShapeIndex& successor : successors) {
nested_queue.push(successor);
}
}
}
}
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
dynamic_update_slices_already_allocated_.insert(instruction);
} else if (instruction->IsCustomCall("AllocateBuffer")) {
VLOG(2) << absl::StreamFormat(
"DynamicUpdateSlice \"%s\" already writes into an AllocateBuffer "
"\"%s\"",
dynamic_update_slice->name(), instruction->name());
return absl::OkStatus();
}
const std::vector<InstructionAndShapeIndex> predecessors =
host_offload_utils::GetPredecessors(instruction_and_shape);
for (const InstructionAndShapeIndex& predecessor : predecessors) {
HloInstruction* predecessor_instruction = predecessor.instruction;
if (predecessor_instruction->opcode() == HloOpcode::kBroadcast) {
found_broadcast = true;
HloInstruction* broadcast_user = instruction_and_shape.instruction;
const auto operand_indices =
broadcast_user->OperandIndices(predecessor_instruction);
CHECK(!operand_indices.empty())
<< "We could only have the broadcast as a predecessor if it is an "
"operand of this instruction; something is wrong.";
HloInstruction* allocate_buffer =
predecessor_instruction->parent()->AddInstruction(
HloInstruction::CreateCustomCall(
predecessor_instruction->shape(), {}, "AllocateBuffer"));
VLOG(1) << absl::StreamFormat(
"Created new AllocateBuffer instruction \"%s\"",
allocate_buffer->ToString());
SetMemorySpace(allocate_buffer->mutable_shape(), kHostMemorySpaceColor);
for (int64_t index : operand_indices) {
TF_RETURN_IF_ERROR(
broadcast_user->ReplaceOperandWith(index, allocate_buffer));
}
if (predecessor_instruction->user_count() == 0) {
VLOG(3) << absl::StreamFormat(
"Broadcast \"%s\" has no remaining users; removing.",
predecessor_instruction->name());
TF_RETURN_IF_ERROR(
predecessor_instruction->parent()->RemoveInstruction(
predecessor_instruction));
}
} else {
queue.push(predecessor);
}
}
}
if (!found_broadcast) {
return absl::InvalidArgumentError(
absl::StrFormat("DynamicUpdateSlice \"%s\"'s first operand is not the "
"result of a broadcast.",
dynamic_update_slice->name()));
}
return absl::OkStatus();
}
absl::StatusOr<HloInstruction*> HostOffloader::DynamifySlice(
HloInstruction* slice) {
std::vector<HloInstruction*> start_constants;
for (int64_t start : slice->slice_starts()) {
HloInstruction* constant = slice->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(start)));
start_constants.push_back(constant);
}
std::vector<int64_t> slice_sizes;
slice_sizes.reserve(slice->slice_limits().size());
for (int i = 0; i < slice->slice_limits().size(); ++i) {
slice_sizes.push_back(slice->slice_limits()[i] - slice->slice_starts()[i]);
}
HloInstruction* new_ds =
slice->parent()->AddInstruction(HloInstruction::CreateDynamicSlice(
slice->shape(), slice->mutable_operand(0), start_constants,
slice_sizes));
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(new_ds));
VLOG(2) << absl::StreamFormat(
"Changed slice \"%s\" into dynamic slice \"%s\"", slice->name(),
new_ds->name());
TF_RETURN_IF_ERROR(slice->parent()->RemoveInstruction(slice));
return new_ds;
}
absl::StatusOr<bool> HostOffloader::ApplySchedulingFix(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
auto uses_parameter_buffer = [&](HloInstruction* hlo) {
for (const HloBuffer* buffer : alias_analysis->ComputeBuffersAt(hlo)) {
for (const HloValue* value : buffer->values()) {
for (const HloPosition& pos : value->positions()) {
if (absl::c_linear_search(hlo->parent()->parameter_instructions(),
pos.instruction)) {
return true;
}
}
}
}
return false;
};
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (computation == computation->parent()->entry_computation()) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kDynamicUpdateSlice) {
continue;
}
if (instruction->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
continue;
}
HloInstruction* operand = instruction->mutable_operand(1);
if (uses_parameter_buffer(operand)) {
HloInstruction* copy =
instruction->parent()->AddInstruction(HloInstruction::CreateUnary(
operand->shape(), HloOpcode::kCopy, operand));
VLOG(5) << "Added copy " << std::quoted(copy->name())
<< " for DynamicUpdateSlice " << instruction->name()
<< "'s 1st operand " << operand->name();
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(1, copy));
changed = true;
}
}
}
return changed;
}
namespace {
absl::Status ValidateAsyncComputationStructure(HloComputation* computation) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kParameter || instr->IsRoot()) {
continue;
}
return absl::InternalError(
absl::StrCat("Unexpected instruction found in async computation: ",
instr->ToString()));
}
return absl::OkStatus();
}
absl::StatusOr<bool> UpdateMemorySpaceForHostOffloadedOutputs(
HloInstruction* call_start,
ShapeTree<std::vector<InstructionAndShapeIndex>> host_instrs_tree) {
std::vector<InstructionAndShapeIndex> to_replace;
HloComputation* called_computation = call_start->async_wrapped_computation();
TF_RETURN_IF_ERROR(ValidateAsyncComputationStructure(called_computation));
HloInstruction* root = called_computation->root_instruction();
Shape* root_shape = root->mutable_shape();
host_instrs_tree.ForEachMutableElement([&](ShapeIndex output_index,
std::vector<
InstructionAndShapeIndex>*
instruction_and_shape_indexes)
-> void {
for (InstructionAndShapeIndex& instr_and_shape :
*instruction_and_shape_indexes) {
if (instr_and_shape.instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
to_replace.emplace_back(instr_and_shape);
continue;
}
SetMemorySpace(ShapeUtil::GetMutableSubshape(
instr_and_shape.instruction->mutable_shape(),
instr_and_shape.shape_index),
Layout::kHostMemorySpace);
}
if (!instruction_and_shape_indexes->empty()) {
SetMemorySpace(ShapeUtil::GetMutableSubshape(root_shape, output_index),
Layout::kHostMemorySpace);
}
});
bool modified = false;
for (InstructionAndShapeIndex& instr_and_shape : to_replace) {
modified = true;
HloInstruction* pred = instr_and_shape.instruction->mutable_operand(0);
TF_RETURN_IF_ERROR(instr_and_shape.instruction->ReplaceAllUsesWith(pred));
}
return modified;
}
bool ExtraCheckForValidUsageOnHostForHostOffloadedOutputs(
const Shape& entry_computation_shape,
InstructionAndShapeIndex& instruction_and_shape_index) {
HloInstruction* instruction = instruction_and_shape_index.instruction;
ShapeIndex& shape_index = instruction_and_shape_index.shape_index;
if (instruction->IsRoot() && instruction->parent()->IsEntryComputation()) {
if (ShapeUtil::GetSubshape(entry_computation_shape, shape_index)
.layout()
.memory_space() != Layout::kHostMemorySpace) {
return false;
}
}
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() !=
host_memory_offload_annotations::kMoveToHostCustomCallTarget) {
return false;
}
if (instruction->opcode() == HloOpcode::kAsyncStart ||
instruction->opcode() == HloOpcode::kAsyncDone) {
return false;
}
return true;
}
}
absl::StatusOr<bool> HostOffloader::HandleRedundantCopiesBackToHost(
const HloModule* module, HloInstruction* instruction) {
HloAsyncInstruction* call_start = Cast<HloAsyncInstruction>(instruction);
CHECK_EQ(call_start->users().size(), 1);
HloInstruction* call_done = call_start->users()[0];
const Shape& entry_computation_shape =
module->entry_computation_layout().result_layout().shape();
Shape* done_shape = call_done->mutable_shape();
ShapeTree<std::vector<InstructionAndShapeIndex>> host_instrs_tree(done_shape);
TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableLeafShapeWithStatus(
done_shape, [&](Shape* subshape, const ShapeIndex& output_shape_index) {
std::queue<InstructionAndShapeIndex> queue;
queue.push(InstructionAndShapeIndex(call_done, output_shape_index));
constexpr int64_t kShapeTupleOutputIndexInAsyncStart = 1;
std::vector<int32_t> start_shape_index_vec;
start_shape_index_vec.push_back(kShapeTupleOutputIndexInAsyncStart);
start_shape_index_vec.insert(start_shape_index_vec.end(),
output_shape_index.begin(),
output_shape_index.end());
ShapeIndex start_shape_index = {start_shape_index_vec.begin(),
start_shape_index_vec.end()};
host_instrs_tree.mutable_element(output_shape_index)
->push_back(
InstructionAndShapeIndex(call_start, start_shape_index));
host_instrs_tree.mutable_element(output_shape_index)
->push_back(
InstructionAndShapeIndex(call_done, output_shape_index));
bool host_only = true;
bool entry_compute_output = false;
while (!queue.empty() && host_only) {
InstructionAndShapeIndex instruction_and_shape_index = queue.front();
queue.pop();
for (HloInstruction* user :
instruction_and_shape_index.instruction->users()) {
if (user->opcode() == HloOpcode::kAsyncStart) {
host_only = false;
break;
}
}
TF_ASSIGN_OR_RETURN(
std::vector<InstructionAndShapeIndex> successors,
host_offload_utils::GetSuccessors(InstructionAndShapeIndex(
instruction_and_shape_index.instruction,
instruction_and_shape_index.shape_index)));
for (InstructionAndShapeIndex& successor : successors) {
if (!host_offload_utils::IsValidDuringPureMemoryOffload(
successor.instruction) ||
!ExtraCheckForValidUsageOnHostForHostOffloadedOutputs(
entry_computation_shape, successor)) {
host_only = false;
break;
}
if (successor.instruction->IsRoot() &&
successor.instruction->parent()->IsEntryComputation()) {
entry_compute_output = true;
}
queue.push(successor);
host_instrs_tree.mutable_element(output_shape_index)
->emplace_back(successor);
}
}
if (!host_only || !entry_compute_output) {
host_instrs_tree.mutable_element(output_shape_index)->clear();
}
return absl::OkStatus();
}));
return UpdateMemorySpaceForHostOffloadedOutputs(call_start, host_instrs_tree);
}
absl::StatusOr<bool> HostOffloader::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
bool changed_in_loop;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (host_offload_utils::IsHostAsyncStart(instruction)) {
TF_ASSIGN_OR_RETURN(changed_in_loop, HandleRedundantCopiesBackToHost(
module, instruction));
changed = changed || changed_in_loop;
}
}
}
TF_ASSIGN_OR_RETURN(const bool input_streaming_changed_module,
HandleInputStreaming(module->entry_computation()));
changed = changed || input_streaming_changed_module;
do {
changed_in_loop = false;
std::vector<HloComputation*> post_order_computations =
module->MakeComputationPostOrder(execution_threads);
for (auto it = post_order_computations.rbegin();
it != post_order_computations.rend(); ++it) {
HloComputation* computation = *it;
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
TF_ASSIGN_OR_RETURN(changed_in_loop,
HandleMoveToHostCustomCall(instruction));
if (changed_in_loop) {
changed = true;
break;
}
}
}
if (changed_in_loop) {
break;
}
}
} while (changed_in_loop);
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
TF_ASSIGN_OR_RETURN(bool result,
HandleMoveToDeviceCustomCall(instruction));
changed = changed || result;
}
}
}
TF_ASSIGN_OR_RETURN(bool applied_scheduling_fix,
ApplySchedulingFix(module, execution_threads));
changed = changed || applied_scheduling_fix;
HloCSE cse(true);
TF_ASSIGN_OR_RETURN(bool cse_changed, cse.Run(module, execution_threads));
changed = changed || cse_changed;
return changed;
}
} | #include "xla/service/host_offloader.h"
#include <cstdint>
#include <memory>
#include <stack>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/host_offload_legalize.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace {
class HostOffloaderTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunHostOffloader(HloModule* module,
bool after_layout = false) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
bool changed = false;
HostOffloadLegalize host_offload_legalize(Layout::kHostMemorySpace,
after_layout);
TF_ASSIGN_OR_RETURN(bool legal_changed, host_offload_legalize.Run(module));
changed |= legal_changed;
HostOffloader host_offloader(Layout::kHostMemorySpace);
TF_ASSIGN_OR_RETURN(bool offload_changed, host_offloader.Run(module));
changed |= offload_changed;
return changed;
}
void TestShapeHasMemorySpace(const Shape& shape, int64_t memory_space) {
ASSERT_TRUE(shape.has_layout());
EXPECT_EQ(shape.layout().memory_space(), memory_space);
}
bool HaveRemainingOffloadAnnotations(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget,
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget})) {
return true;
}
}
}
return false;
}
};
absl::flat_hash_set<const HloInstruction*>
getInstructionsWithOpcodeFromComputation(const HloComputation* computation,
HloOpcode target_opcode) {
absl::flat_hash_set<const HloInstruction*> instructions;
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == target_opcode) {
instructions.emplace(instruction);
}
}
return instructions;
}
TEST_F(HostOffloaderTest, BasicDusDs) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusFirstOperandIsNotFromABroadcast) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
param_2 = f32[2,2048,2048] parameter(2)
constant_s32_0 = s32[] constant(0)
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(param_2, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const absl::StatusOr<bool> result = RunHostOffloader(module.get());
EXPECT_FALSE(result.ok());
}
TEST_F(HostOffloaderTest, DusDsWithTupleAfterBroadcast) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
tuple = (f32[2,2048,2048]) tuple(broadcast)
gte = f32[2,2048,2048] get-tuple-element(tuple), index=0
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(gte, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* tuple;
HloInstruction* gte;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::GetTupleElement(
>e,
m::Tuple(&tuple, m::CustomCall(&allocate_buffer,
{"AllocateBuffer"})),
0),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusWithoutDs) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
ROOT load_custom_call = f32[2,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* copy;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, DusAndNoCopyFromSameCustomCall) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
tuple = (f32[1,2048,2048]) tuple(offload_custom_call)
gte = f32[1,2048,2048] get-tuple-element(tuple), index=0
load_custom_call_0 = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
load_custom_call_1 = f32[1,2048,2048] custom-call(gte), custom_call_target="MoveToDevice"
ROOT tuple_1 = (f32[1,2048,2048], f32[1,2048,2048]) tuple(load_custom_call_0, load_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_match_1;
HloInstruction* param_match_2;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
HloInstruction* copy_to_host;
HloInstruction* tuple_0;
HloInstruction* gte;
HloInstruction* copy_to_device;
HloInstruction* tuple_1;
const auto dynamic_slice_pattern = m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m_match_1, 0), m::Op(), m::Op(),
m::Op()),
m::Op(), m::Op(), m::Op());
const auto copy_pattern = m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Tuple(&tuple_0,
m::Copy(©_to_host, m::Parameter(¶m_match_2, 0))),
0));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(&tuple_1, dynamic_slice_pattern, copy_pattern)));
EXPECT_EQ(param_match_1, param_match_2);
TestShapeHasMemorySpace(param_match_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicAsyncCustomCallWithAliasing) {
const std::string& hlo_string = R"(
HloModule m, input_output_alias={{}: (0, {}, must-alias)},
entry_computation_layout={(f32[4096]{0:T(128)S(5)})->f32[4096]{0:T(128)S(5)}}
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%async-start = ((f32[4096]{0}), f32[4096]{0}, u32[]) custom-call-start(%a),
custom_call_target="Foo",
output_to_operand_aliasing={{}: (0, {})}
ROOT %async-done = f32[4096]{0} custom-call-done(%async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TestShapeHasMemorySpace(async_done->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, ParameterStreamingWithXposeCopyFeedingIntoWhile) {
const std::string& hlo_string = R"(
HloModule jit__prefill_impl, entry_computation_layout={(bf16[2,16,16]{2,1,0:T(8,128)(2,1)S(5)})->bf16[2,16,16]{1,2,0:T(8,128)(2,1)}}
while_condition {
condition_param = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=0
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
input_tuple.0 = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
orig_data = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=1
custom-call.0 = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} custom-call(orig_data), custom_call_target="MoveToDevice"
sum = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=2
sum.1 = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} add(custom-call.0, sum)
constant_1 = s32[] constant(1)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1)
ROOT tuple_result.0 = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) tuple(incremented_index.0, custom-call.0, sum.1)
}
ENTRY main {
param.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} parameter(0)
copy = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} copy(param.0)
constant_0 = s32[] constant(0)
constant_0.0 = bf16[] constant(0.0)
broadcast = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} broadcast(constant_0.0), dimensions={}
tuple_for_while = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) tuple(constant_0, copy, broadcast)
while = (s32[], bf16[2,16,16]{1,2,0:T(8,128)(2,1)}, bf16[2,16,16]{1,2,0:T(8,128)(2,1)}) while(tuple_for_while), condition=while_condition, body=while_body
ROOT gte = bf16[2,16,16]{1,2,0:T(8,128)(2,1)} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, ParameterStreamingFeedingIntoWhile) {
const std::string& hlo_string = R"(
HloModule jit__prefill_impl, entry_computation_layout={(bf16[2,16,16]{2,1,0:T(8,128)(2,1)S(5)})->bf16[2,16,16]{2,1,0:T(8,128)(2,1)}}
while_condition {
condition_param = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=0
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
input_tuple.0 = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
orig_data = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=1
custom-call.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} custom-call(orig_data), custom_call_target="MoveToDevice"
sum = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(input_tuple.0), index=2
sum.1 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} add(custom-call.0, sum)
constant_1 = s32[] constant(1)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1)
ROOT tuple_result.0 = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) tuple(incremented_index.0, custom-call.0, sum.1)
}
ENTRY main {
param.0 = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} parameter(0)
constant_0 = s32[] constant(0)
constant_0.0 = bf16[] constant(0.0)
broadcast = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} broadcast(constant_0.0), dimensions={}
tuple_for_while = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) tuple(constant_0, param.0, broadcast)
while = (s32[], bf16[2,16,16]{2,1,0:T(8,128)(2,1)}, bf16[2,16,16]{2,1,0:T(8,128)(2,1)}) while(tuple_for_while), condition=while_condition, body=while_body
ROOT gte = bf16[2,16,16]{2,1,0:T(8,128)(2,1)} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, ParameterStreamingInScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(f32[8,2]{0,1:T(2,128)S(5)})->(f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)})},
allow_spmd_sharding_propagation_to_output={true,true}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
while_body {
arg_tuple.8 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) parameter(0)
get-tuple-element.9 = s32[]{:T(256)} get-tuple-element(arg_tuple.8), index=0
constant.12 = s32[]{:T(256)} constant(1)
add.29 = s32[]{:T(256)} add(get-tuple-element.9, constant.12)
get-tuple-element.10 = f32[8,2]{0,1:T(2,128)} get-tuple-element(arg_tuple.8), index=1
get-tuple-element.11 = f32[8,2]{0,1:T(2,128)} get-tuple-element(arg_tuple.8), index=2
constant.16 = s32[]{:T(256)} constant(0)
dynamic-slice.20 = f32[1,2]{0,1:T(2,128)} dynamic-slice(get-tuple-element.11, get-tuple-element.9, constant.16), dynamic_slice_sizes={1,2}
constant.1 = f32[] constant(-0)
reduce = f32[2]{0:T(256)} reduce(dynamic-slice.20, constant.1), dimensions={0}, to_apply=add
custom-call = f32[2]{0:T(256)} custom-call(reduce), custom_call_target="MoveToDevice"
constant.13 = f32[]{:T(256)} constant(1)
broadcast.14 = f32[2]{0:T(256)} broadcast(constant.13), dimensions={}
add.23 = f32[2]{0:T(256)} add(custom-call, broadcast.14)
reshape.24 = f32[1,2]{0,1:T(2,128)} reshape(add.23)
dynamic-update-slice.28 = f32[8,2]{0,1:T(2,128)} dynamic-update-slice(get-tuple-element.10, reshape.24, get-tuple-element.9, constant.16)
ROOT tuple.30 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) tuple(add.29, dynamic-update-slice.28, get-tuple-element.11)
}
condition {
arg_tuple.32 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) parameter(0)
get-tuple-element.33 = s32[]{:T(256)} get-tuple-element(arg_tuple.32), index=0
constant.36 = s32[]{:T(256)} constant(8)
ROOT compare.37 = pred[]{:T(1024)} compare(get-tuple-element.33, constant.36), direction=LT
}
ENTRY e {
constant.3 = f32[]{:T(256)} constant(1)
constant.2 = s32[]{:T(256)} constant(0)
constant.4 = f32[]{:T(256)} constant(0)
broadcast.5 = f32[8,2]{0,1:T(2,128)} broadcast(constant.4), dimensions={}
Arg_0.1 = f32[8,2]{0,1:T(2,128)} parameter(0), sharding={replicated}
tuple.6 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.2, broadcast.5, Arg_0.1)
while.38 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[8,2]{0,1:T(2,128)}) while(tuple.6), condition=condition, body=while_body
get-tuple-element.40 = f32[8,2]{0,1:T(2,128)} get-tuple-element(while.38), index=1
ROOT tuple.42 = (f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.3, get-tuple-element.40)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
}
TEST_F(HostOffloaderTest, OutputStreamingInScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(f32[4,1]{0,1:T(2,128)})->(f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)S(5)})},
allow_spmd_sharding_propagation_to_output={true,true}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
while_body {
param.1 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) parameter(0)
get-tuple-element.1 = s32[]{:T(256)} get-tuple-element(param.1), index=0
constant.9 = s32[]{:T(256)} constant(1)
add.1 = s32[]{:T(256)} add(get-tuple-element.1, constant.9)
get-tuple-element.2 = f32[8,2]{0,1:T(2,128)} get-tuple-element(param.1), index=1
get-tuple-element.3 = f32[4,1]{0,1:T(2,128)} get-tuple-element(param.1), index=2
bitcast = f32[1,4,1]{1,2,0:T(2,128)} bitcast(get-tuple-element.3)
all-gather.2 = f32[4,4,1]{1,2,0:T(2,128)} all-gather(bitcast), channel_id=2, replica_groups={{0,1,2,3}}, dimensions={0}, use_global_device_ids=true
constant.20 = f32[] constant(-0)
reduce = f32[4,4]{1,0:T(4,128)} reduce(all-gather.2, constant.20), dimensions={2}, to_apply=add
bitcast.1 = f32[2,4,2,1]{1,2,0,3:T(2,128)} bitcast(reduce)
copy.1 = f32[2,4,2,1]{1,0,2,3:T(2,128)} copy(bitcast.1)
reshape.6 = f32[8,2]{0,1:T(2,128)} reshape(copy.1)
constant.10 = s32[]{:T(256)} constant(0)
dynamic-slice.0 = f32[1,2]{0,1:T(2,128)} dynamic-slice(reshape.6, get-tuple-element.1, constant.10), dynamic_slice_sizes={1,2}
constant.11 = f32[]{:T(256)} constant(1)
broadcast.4 = f32[1,2]{0,1:T(2,128)} broadcast(constant.11), dimensions={}
add.2 = f32[1,2]{0,1:T(2,128)} add(dynamic-slice.0, broadcast.4)
reduce.1 = f32[2]{0:T(256)} reduce(add.2, constant.20), dimensions={0}, to_apply=add
custom-call.1 = f32[2]{0:T(256)} custom-call(reduce.1), custom_call_target="MoveToHost"
reshape.8 = f32[1,2]{0,1:T(2,128)} reshape(custom-call.1)
dynamic-update-slice.0 = f32[8,2]{0,1:T(2,128)} dynamic-update-slice(get-tuple-element.2, reshape.8, get-tuple-element.1, constant.10)
ROOT tuple = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) tuple(add.1, dynamic-update-slice.0, get-tuple-element.3)
}
condition {
param = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) parameter(0)
get-tuple-element = s32[]{:T(256)} get-tuple-element(param), index=0
constant.8 = s32[]{:T(256)} constant(8)
ROOT compare.0 = pred[]{:T(1024)} compare(get-tuple-element, constant.8), direction=LT
}
ENTRY e {
constant.17 = f32[]{:T(256)} constant(1)
constant.18 = s32[]{:T(256)} constant(0)
constant.19 = f32[]{:T(256)} constant(0)
broadcast.6 = f32[8,2]{0,1:T(2,128)} broadcast(constant.19), dimensions={}
param.2 = f32[4,1]{0,1:T(2,128)} parameter(0), sharding={devices=[2,2]<=[4]}
tuple.1 = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) tuple(constant.18, broadcast.6, param.2)
while = (s32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}, f32[4,1]{0,1:T(2,128)}) while(tuple.1), condition=condition, body=while_body
get-tuple-element.4 = f32[8,2]{0,1:T(2,128)} get-tuple-element(while), index=1
ROOT tuple.2 = (f32[]{:T(256)}, f32[8,2]{0,1:T(2,128)}) tuple(constant.17, get-tuple-element.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHostOffloader(module.get(), true));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloVerifier verifier(true,
true);
TF_EXPECT_OK(verifier.Run(module.get()).status());
}
TEST_F(HostOffloaderTest, BasicNoCopy) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
ROOT load_custom_call = f32[2048] custom-call(offload_custom_call), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughTuple) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
other_param = f32[2048] parameter(1)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple = (f32[2048], f32[2048]) tuple(offload_custom_call, other_param)
gte_0 = f32[2048] get-tuple-element(tuple), index=0
gte_1 = f32[2048] get-tuple-element(tuple), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Tuple(&tuple, m::Copy(©_to_host, m::Parameter(¶m, 0)),
m::Op()),
0))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughNestedTuple) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
other_param_0 = f32[2048] parameter(1)
other_param_1 = f32[2048] parameter(2)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple_0 = (f32[2048], f32[2048]) tuple(offload_custom_call, other_param_0)
tuple_1 = ((f32[2048], f32[2048]), f32[2048]) tuple(tuple_0, other_param_1)
gte_0 = (f32[2048], f32[2048]) get-tuple-element(tuple_1), index=0
gte_1 = f32[2048] get-tuple-element(tuple_1), index=1
gte_2 = f32[2048] get-tuple-element(gte_0), index=0
gte_3 = f32[2048] get-tuple-element(gte_0), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_2), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple_0;
HloInstruction* gte_0;
HloInstruction* tuple_1;
HloInstruction* gte_1;
HloInstruction* copy_to_device;
const auto copy_param_pattern =
m::Copy(©_to_host, m::Parameter(¶m, 0));
const auto tuple_of_tuple_pattern = m::Tuple(
&tuple_1, m::Tuple(&tuple_0, copy_param_pattern, m::Op()), m::Op());
const auto gte_of_gte_pattern = m::GetTupleElement(
>e_1, m::GetTupleElement(>e_0, tuple_of_tuple_pattern, 0), 0);
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device, gte_of_gte_pattern)));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_0->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(gte_0->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(gte_0->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0, 0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0, 1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughComputation) {
const std::string& hlo_string = R"(
HloModule my_module
other_computation {
ROOT param = f32[2048] parameter(0)
}
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
call = f32[2048] call(offload_custom_call), to_apply=other_computation
ROOT load_custom_call = f32[2048] custom-call(call), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* call;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::Call(&call, m::Copy(©_to_host, m::Parameter(¶m, 0))))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(call->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
ASSERT_THAT(call->called_computations(), ::testing::SizeIs(1));
HloComputation* called_computation = call->called_computations()[0];
HloInstruction* called_computation_param;
ASSERT_THAT(called_computation->root_instruction(),
GmockMatch(m::Parameter(&called_computation_param, 0)));
TestShapeHasMemorySpace(called_computation_param->shape(),
Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughComputationAndTuple) {
const std::string& hlo_string = R"(
HloModule my_module
other_computation {
param_0 = f32[2048] parameter(0)
param_1 = f32[2048] parameter(1)
ROOT tuple = (f32[2048], f32[2048]) tuple(param_0, param_1)
}
ENTRY main {
data_param = f32[2048] parameter(0)
other_param = f32[2048] parameter(1)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
call = (f32[2048], f32[2048]) call(offload_custom_call, other_param), to_apply=other_computation
gte_0 = f32[2048] get-tuple-element(call), index=0
gte_1 = f32[2048] get-tuple-element(call), index=1
ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* call;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e,
m::Call(&call, m::Copy(©_to_host, m::Parameter(¶m, 0)),
m::Op())))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(call->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(call->shape(), {1}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_THAT(call->called_computations(), ::testing::SizeIs(1));
HloComputation* called_computation = call->called_computations()[0];
HloInstruction* called_computation_param_0;
HloInstruction* called_computation_param_1;
HloInstruction* tuple;
ASSERT_THAT(
called_computation->root_instruction(),
GmockMatch(m::Tuple(&tuple, m::Parameter(&called_computation_param_0, 0),
m::Parameter(&called_computation_param_1, 1))));
TestShapeHasMemorySpace(called_computation_param_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(called_computation_param_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyThroughWhile) {
const std::string& hlo_string = R"(
HloModule my_module
while_body {
ROOT param = f32[2048] parameter(0)
}
while_condition {
param = f32[2048] parameter(0)
constant_0 = s32[] constant(0)
constant_1 = s32[] constant(1)
ROOT pred_result = pred[] compare(constant_1, constant_0), direction=LT
}
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
while = f32[2048] while(offload_custom_call), condition=while_condition, body=while_body
ROOT load_custom_call = f32[2048] custom-call(while), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* while_instr;
HloInstruction* copy_to_device;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::While(&while_instr,
m::Copy(©_to_host, m::Parameter(¶m, 0))))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(while_instr->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
HloComputation* while_condition = while_instr->while_condition();
ASSERT_THAT(while_condition->parameter_instructions(), ::testing::SizeIs(1));
TestShapeHasMemorySpace(while_condition->parameter_instruction(0)->shape(),
Layout::kHostMemorySpace);
HloInstruction* while_body_param;
HloComputation* while_body = while_instr->while_body();
ASSERT_THAT(while_body->root_instruction(),
GmockMatch(m::Parameter(&while_body_param, 0)));
TestShapeHasMemorySpace(while_body_param->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyWithOptBarrier) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
tuple = (f32[2048]) tuple(offload_custom_call)
opt_barrier = (f32[2048]) opt-barrier(tuple)
get_tuple_element = f32[2048] get-tuple-element(opt_barrier), index=0
ROOT load_custom_call = f32[2048] custom-call(get_tuple_element), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* tuple;
HloInstruction* opt_barrier;
HloInstruction* gte;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©_to_device,
m::GetTupleElement(
>e, m::OptimizationBarrier(
&opt_barrier,
m::Tuple(&tuple, m::Copy(©_to_host,
m::Parameter(¶m, 0))))))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyMultipleToDevice) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
constant = f32[] constant(0)
custom_call_0 = f32[] custom-call(constant), custom_call_target="MoveToHost"
tuple_0 = (f32[], f32[]) tuple(custom_call_0, custom_call_0)
opt_barrier = (f32[], f32[]) opt-barrier(tuple_0)
gte_0 = f32[] get-tuple-element(opt_barrier), index=0
custom_call_1 = f32[] custom-call(gte_0), custom_call_target="MoveToDevice"
gte_1 = f32[] get-tuple-element(opt_barrier), index=1
custom_call_2 = f32[] custom-call(gte_1), custom_call_target="MoveToDevice"
ROOT tuple_1 = (f32[], f32[]) tuple(custom_call_1, custom_call_2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* constant;
HloInstruction* copy_to_host_1;
HloInstruction* copy_to_host_2;
HloInstruction* tuple_1;
HloInstruction* opt_barrier;
HloInstruction* gte_1;
HloInstruction* copy_to_device_1;
HloInstruction* gte_2;
HloInstruction* copy_to_device_2;
HloInstruction* tuple_2;
const auto constant_pattern = m::ConstantScalar(&constant, 0);
const auto opt_barrier_pattern = m::OptimizationBarrier(
&opt_barrier,
m::Tuple(&tuple_1, m::Copy(©_to_host_1, constant_pattern),
m::Copy(©_to_host_2, constant_pattern)));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
&tuple_2,
m::Copy(©_to_device_1,
m::GetTupleElement(>e_1, opt_barrier_pattern)),
m::Copy(©_to_device_2,
m::GetTupleElement(>e_2, opt_barrier_pattern)))));
TestShapeHasMemorySpace(constant->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_host_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_1->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device_2->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_2->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple_2->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyWithOptBarrierMoreElaborate) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}}
ENTRY main.24 {
Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]}
cosine.4 = f32[16]{0} cosine(Arg_0.1)
custom-call.5 = f32[16]{0} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16]{0} sine(Arg_0.1)
cosine.7 = f32[16]{0} cosine(sine.3)
custom-call.8 = f32[16]{0} custom-call(cosine.7), custom_call_target="MoveToHost"
sine.6 = f32[16]{0} sine(sine.3)
cosine.9 = f32[16]{0} cosine(sine.6)
custom-call.10 = f32[16]{0} custom-call(cosine.9), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
tuple.11 = (f32[16]{0}, f32[16]{0}, f32[16]{0}, f32[]) tuple(custom-call.5, custom-call.8, custom-call.10, constant.2)
opt-barrier.12 = (f32[16]{0}, f32[16]{0}, f32[16]{0}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16]{0} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16]{0} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16]{0} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16]{0} multiply(broadcast.20, custom-call.19)
get-tuple-element.14 = f32[16]{0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16]{0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16]{0} multiply(multiply.21, custom-call.18)
get-tuple-element.13 = f32[16]{0} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16]{0} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
ROOT multiply.23 = f32[16]{0} multiply(multiply.22, custom-call.17)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* constant;
HloInstruction* sine_0;
HloInstruction* sine_1;
HloInstruction* cosine_0;
HloInstruction* cosine_1;
HloInstruction* cosine_2;
HloInstruction* copy_to_host_0;
HloInstruction* copy_to_host_1;
HloInstruction* copy_to_host_2;
HloInstruction* tuple;
HloInstruction* opt_barrier;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
HloInstruction* gte_3;
HloInstruction* broadcast;
HloInstruction* copy_to_device_0;
HloInstruction* copy_to_device_1;
HloInstruction* copy_to_device_2;
HloInstruction* multiply_0;
HloInstruction* multiply_1;
HloInstruction* multiply_2;
auto parameter_matcher = m::Parameter(¶m, 0);
auto first_sine_matcher = m::Op(&sine_0)
.WithOpcode(xla::HloOpcode::kSin)
.WithOperand(0, parameter_matcher);
auto opt_barrier_matcher = m::OptimizationBarrier(
&opt_barrier,
m::Tuple(
&tuple,
m::Copy(©_to_host_0, m::Op(&cosine_0)
.WithOpcode(xla::HloOpcode::kCos)
.WithOperand(0, parameter_matcher)),
m::Copy(©_to_host_1, m::Op(&cosine_1)
.WithOpcode(xla::HloOpcode::kCos)
.WithOperand(0, first_sine_matcher)),
m::Copy(©_to_host_2,
m::Op(&cosine_2)
.WithOpcode(xla::HloOpcode::kCos)
.WithOperand(0, m::Op(&sine_1)
.WithOpcode(xla::HloOpcode::kSin)
.WithOperand(0, first_sine_matcher))),
m::Constant(&constant)));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(
&multiply_0,
m::Multiply(
&multiply_1,
m::Multiply(
&multiply_2,
m::Broadcast(&broadcast, m::GetTupleElement(
>e_3, opt_barrier_matcher, 3)),
m::Copy(©_to_device_2,
m::GetTupleElement(>e_2, opt_barrier_matcher, 2))),
m::Copy(©_to_device_1,
m::GetTupleElement(>e_1, opt_barrier_matcher, 1))),
m::Copy(©_to_device_0,
m::GetTupleElement(>e_0, opt_barrier_matcher, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(constant->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(sine_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(sine_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(cosine_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(cosine_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(cosine_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_host_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_host_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {3}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(opt_barrier->shape(), {3}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_2->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_3->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_device_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_device_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_device_2->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, NoCopyMultipleUsers) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048] parameter(0)
offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost"
sine = f32[2048] sine(data_param)
load_custom_call = f32[2048] custom-call(offload_custom_call), custom_call_target="MoveToDevice"
ROOT add = f32[2048] add(sine, load_custom_call)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* sine;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
HloInstruction* add;
const auto param_pattern = m::Parameter(¶m, 0);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Add(
&add, m::Sin(&sine, param_pattern),
m::Copy(©_to_device, m::Copy(©_to_host, param_pattern)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(sine->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(add->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicDusDsWithMultipleBroadcastUsers) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
tanh = f32[2,2048,2048] tanh(broadcast)
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
HloInstruction* tanh = nullptr;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kTanh) {
tanh = instruction;
break;
}
}
ASSERT_NE(tanh, nullptr);
HloInstruction* broadcast;
EXPECT_THAT(tanh, GmockMatch(m::Tanh(m::Broadcast(&broadcast))));
TestShapeHasMemorySpace(broadcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(tanh->shape(), Layout::kDefaultMemorySpace);
}
TEST_F(HostOffloaderTest, BasicDusDsBitcastBeforeDus) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
bitcast = f32[1,2048,2048] bitcast(offload_custom_call)
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, bitcast, index_param, constant_s32_0, constant_s32_0)
dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* bitcast;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_slice;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Bitcast(&bitcast, m::Parameter(¶m, 0)), m::Op(),
m::Op(), m::Op()),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(bitcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicDusDsDusAnnotationOnWrongSide) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, data_param, index_param, constant_s32_0, constant_s32_0)
offload_custom_call = f32[1,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToHost"
dynamic_slice = f32[1,2048,2048] dynamic-slice(offload_custom_call, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::StatusOr<bool> statusOrChanged = RunHostOffloader(module.get());
ASSERT_FALSE(statusOrChanged.ok());
}
TEST_F(HostOffloaderTest, BasicDusDsDsAnnotationOnWrongSide) {
const std::string& hlo_string = R"(
HloModule my_module
ENTRY main {
data_param = f32[1,2048,2048] parameter(0)
index_param = s32[] parameter(1)
constant_f32_0 = f32[] constant(0)
constant_s32_0 = s32[] constant(0)
broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}
offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost"
dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)
load_custom_call = f32[2,2048,2048] custom-call(dynamic_update_slice), custom_call_target="MoveToDevice"
ROOT dynamic_slice = f32[1,2048,2048] dynamic-slice(load_custom_call, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* allocate_buffer;
HloInstruction* dynamic_update_slice;
HloInstruction* copy;
HloInstruction* dynamic_slice;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice(
&dynamic_slice,
m::Copy(©,
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::CustomCall(&allocate_buffer, {"AllocateBuffer"}),
m::Parameter(¶m, 0), m::Op(), m::Op(), m::Op())),
m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(allocate_buffer->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivation) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(slice_data_1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(dynamic_slice_1), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
broadcast_1 = f32[96,8,6,2048,1] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, broadcast_0, broadcast_1)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while_0;
HloInstruction* producing_while_1;
{
HloInstruction* tuple;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e_2,
m::While(
&consuming_while,
m::Tuple(
&tuple, m::Constant(),
m::GetTupleElement(>e_0, m::While(&producing_while_0)),
m::GetTupleElement(>e_1, m::While(&producing_while_1)))),
0)));
ASSERT_EQ(producing_while_0, producing_while_1);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
}
{
HloInstruction* allocate_buffer_0;
HloInstruction* allocate_buffer_1;
ASSERT_THAT(producing_while_0,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer_0, {"AllocateBuffer"}),
m::CustomCall(&allocate_buffer_1, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer_0->shape().has_layout());
EXPECT_EQ(allocate_buffer_0->shape().layout().memory_space(),
Layout::kHostMemorySpace);
ASSERT_TRUE(allocate_buffer_1->shape().has_layout());
EXPECT_EQ(allocate_buffer_1->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{2}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice_0;
HloInstruction* dynamic_update_slice_1;
HloInstruction* dynamic_update_slice_second_param_0;
HloInstruction* dynamic_update_slice_second_param_1;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* param_0;
HloInstruction* param_1;
ASSERT_THAT(producing_while_0->while_body()->root_instruction(),
GmockMatch(m::Tuple(
&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice_0,
m::GetTupleElement(>e_0, m::Parameter(¶m_0)),
m::Op(&dynamic_update_slice_second_param_0), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicUpdateSlice(
&dynamic_update_slice_1,
m::GetTupleElement(>e_1, m::Parameter(¶m_1)),
m::Op(&dynamic_update_slice_second_param_1), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(param_0, param_1);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_1->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_1->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 2);
for (const HloInstruction* dynamic_slice : dynamic_slices) {
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivationSourceIsAllocateBuffer) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(slice_data_1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(dynamic_slice_1), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
allocate_buffer_0 = f32[96,8,6,2048,2048] custom-call(), custom_call_target="AllocateBuffer"
allocate_buffer_1 = f32[96,8,6,2048,1] custom-call(), custom_call_target="AllocateBuffer"
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, allocate_buffer_0, allocate_buffer_1)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while_0;
HloInstruction* producing_while_1;
{
HloInstruction* tuple;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e_2,
m::While(
&consuming_while,
m::Tuple(
&tuple, m::Constant(),
m::GetTupleElement(>e_0, m::While(&producing_while_0)),
m::GetTupleElement(>e_1, m::While(&producing_while_1)))),
0)));
ASSERT_EQ(producing_while_0, producing_while_1);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
}
{
HloInstruction* allocate_buffer_0;
HloInstruction* allocate_buffer_1;
ASSERT_THAT(producing_while_0,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer_0, {"AllocateBuffer"}),
m::CustomCall(&allocate_buffer_1, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer_0->shape().has_layout());
EXPECT_EQ(allocate_buffer_0->shape().layout().memory_space(),
Layout::kHostMemorySpace);
ASSERT_TRUE(allocate_buffer_1->shape().has_layout());
EXPECT_EQ(allocate_buffer_1->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{2}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice_0;
HloInstruction* dynamic_update_slice_1;
HloInstruction* dynamic_update_slice_second_param_0;
HloInstruction* dynamic_update_slice_second_param_1;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* param_0;
HloInstruction* param_1;
ASSERT_THAT(producing_while_0->while_body()->root_instruction(),
GmockMatch(m::Tuple(
&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice_0,
m::GetTupleElement(>e_0, m::Parameter(¶m_0)),
m::Op(&dynamic_update_slice_second_param_0), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicUpdateSlice(
&dynamic_update_slice_1,
m::GetTupleElement(>e_1, m::Parameter(¶m_1)),
m::Op(&dynamic_update_slice_second_param_1), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(param_0, param_1);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_1->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_1->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 2);
for (const HloInstruction* dynamic_slice : dynamic_slices) {
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivationDsWithReshape) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(slice_data_1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
rs = f32[1,8,6,2048,2048] reshape(dynamic_slice_0)
rs2 = f32[1,8,6,2048,1] reshape(dynamic_slice_1)
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(rs), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(rs2), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
broadcast_1 = f32[96,8,6,2048,1] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, broadcast_0, broadcast_1)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while_0;
HloInstruction* producing_while_1;
{
HloInstruction* tuple;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* gte_2;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e_2,
m::While(
&consuming_while,
m::Tuple(
&tuple, m::Constant(),
m::GetTupleElement(>e_0, m::While(&producing_while_0)),
m::GetTupleElement(>e_1, m::While(&producing_while_1)))),
0)));
ASSERT_EQ(producing_while_0, producing_while_1);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
}
{
HloInstruction* allocate_buffer_0;
HloInstruction* allocate_buffer_1;
ASSERT_THAT(producing_while_0,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer_0, {"AllocateBuffer"}),
m::CustomCall(&allocate_buffer_1, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer_0->shape().has_layout());
EXPECT_EQ(allocate_buffer_0->shape().layout().memory_space(),
Layout::kHostMemorySpace);
ASSERT_TRUE(allocate_buffer_1->shape().has_layout());
EXPECT_EQ(allocate_buffer_1->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{2}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice_0;
HloInstruction* dynamic_update_slice_1;
HloInstruction* dynamic_update_slice_second_param_0;
HloInstruction* dynamic_update_slice_second_param_1;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* param_0;
HloInstruction* param_1;
ASSERT_THAT(producing_while_0->while_body()->root_instruction(),
GmockMatch(m::Tuple(
&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice_0,
m::GetTupleElement(>e_0, m::Parameter(¶m_0)),
m::Op(&dynamic_update_slice_second_param_0), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicUpdateSlice(
&dynamic_update_slice_1,
m::GetTupleElement(>e_1, m::Parameter(¶m_1)),
m::Op(&dynamic_update_slice_second_param_1), m::Op(),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(param_0, param_1);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_0->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_1->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param_0->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param_1->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 2);
for (const HloInstruction* dynamic_slice : dynamic_slices) {
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {2}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, LlmActivationHostMemoryMultipleConsumers) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]) tuple(constant_s32_0, while_output_1)
consuming_while = (s32[], f32[96,8,6,2048,2048]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048] get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* consuming_while;
HloInstruction* producing_while;
{
HloInstruction* tuple;
HloInstruction* gte_between_whiles;
HloInstruction* final_gte;
HloInstruction* dynamic_slice_0;
HloInstruction* dynalic_slice_1;
HloInstruction* add;
auto pattern_ending_in_gte = m::GetTupleElement(
&final_gte,
m::While(&consuming_while,
m::Tuple(&tuple, m::Constant(),
m::GetTupleElement(>e_between_whiles,
m::While(&producing_while)))));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Add(&add,
m::DynamicSlice(&dynamic_slice_0, pattern_ending_in_gte,
m::Op(), m::Op(), m::Op(), m::Op(), m::Op()),
m::DynamicSlice(&dynalic_slice_1, pattern_ending_in_gte,
m::ConstantScalar(41), m::Op(), m::Op(),
m::Op(), m::Op()))));
TestShapeHasMemorySpace(gte_between_whiles->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(consuming_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(producing_while->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(final_gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice_0->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dynalic_slice_1->shape(),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(add->shape(), Layout::kDefaultMemorySpace);
}
{
HloInstruction* allocate_buffer;
ASSERT_THAT(producing_while,
GmockMatch(m::While(m::Tuple(
m::Constant(),
m::CustomCall(&allocate_buffer, {"AllocateBuffer"})))));
ASSERT_TRUE(allocate_buffer->shape().has_layout());
EXPECT_EQ(allocate_buffer->shape().layout().memory_space(),
Layout::kHostMemorySpace);
}
TestShapeHasMemorySpace(
ShapeUtil::GetSubshape(
consuming_while->while_condition()->parameter_instruction(0)->shape(),
{1}),
Layout::kHostMemorySpace);
{
HloInstruction* tuple;
HloInstruction* dynamic_update_slice;
HloInstruction* dynamic_update_slice_second_param;
HloInstruction* gte;
HloInstruction* param;
ASSERT_THAT(
producing_while->while_body()->root_instruction(),
GmockMatch(m::Tuple(&tuple, m::Op(),
m::DynamicUpdateSlice(
&dynamic_update_slice,
m::GetTupleElement(>e, m::Parameter(¶m)),
m::Op(&dynamic_update_slice_second_param),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op()))));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(param->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_update_slice_second_param->shape(),
Layout::kDefaultMemorySpace);
}
{
const absl::flat_hash_set<const HloInstruction*> dynamic_slices =
getInstructionsWithOpcodeFromComputation(consuming_while->while_body(),
HloOpcode::kDynamicSlice);
ASSERT_EQ(dynamic_slices.size(), 1);
const HloInstruction* dynamic_slice = *dynamic_slices.begin();
const HloInstruction* get_tuple_element;
const HloInstruction* parameter;
ASSERT_THAT(
dynamic_slice,
GmockMatch(m::DynamicSlice(
m::GetTupleElement(&get_tuple_element, m::Parameter(¶meter)),
m::Op(), m::Op(), m::Op(), m::Op(), m::Op())));
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(parameter->shape(), {1}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(get_tuple_element->shape(),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(dynamic_slice->shape(),
Layout::kDefaultMemorySpace);
}
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, InsertExtraCopyForScheduling) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.0), index=1
data_1.0 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.0), index=2
data_2.1 = f32[1,8,6,2048,1] get-tuple-element(input_tuple.0), index=3
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
slice_data_1 = f32[1,8,6,2048,1] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
custom_call_1.0 = f32[1,8,6,2048,1] custom-call(data_2.1), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048] dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
dynamic_update_slice_1 = f32[96,8,6,2048,1] dynamic-update-slice(data_1.0, custom_call_1.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) tuple(incremented_index.0, dynamic_update_slice_0, dynamic_update_slice_1, data_2.1)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048] get-tuple-element(input_tuple.1), index=1
data_1.1 = f32[96,8,6,2048,1] get-tuple-element(input_tuple.1), index=2
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
dynamic_slice_1 = f32[1,8,6,2048,1] dynamic-slice(data_1.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,1}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
custom_call_1.1 = f32[1,8,6,2048,1] custom-call(dynamic_slice_1), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
tanh_1 = f32[1,8,6,2048,1] tanh(custom_call_1.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(incremented_index.1, data_0.1, data_1.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
broadcast_0 = f32[96,8,6,2048,2048] broadcast(entry_param_0), dimensions={}
broadcast_1 = f32[96,8,6,2048,1] broadcast(entry_param_0), dimensions={}
broadcast_2 = f32[1,8,6,2048,1] broadcast(entry_param_0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) tuple(constant_s32_0, broadcast_0, broadcast_1, broadcast_2)
producing_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1], f32[1,8,6,2048,1]) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048] get-tuple-element(producing_while), index=1
while_output_2 = f32[96,8,6,2048,1] get-tuple-element(producing_while), index=2
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) tuple(constant_s32_0, while_output_1, while_output_2)
consuming_while = (s32[], f32[96,8,6,2048,2048], f32[96,8,6,2048,1]) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
ROOT result = s32[] get-tuple-element(consuming_while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
const HloInstruction* dus0 =
FindInstruction(module.get(), "dynamic_update_slice_0");
const HloInstruction* dus1 =
FindInstruction(module.get(), "dynamic_update_slice_1");
EXPECT_THAT(dus0, GmockMatch(m::DynamicUpdateSlice(m::Op(), m::Constant(),
m::Op(), m::Op(), m::Op(),
m::Op(), m::Op())));
EXPECT_THAT(dus1, GmockMatch(m::DynamicUpdateSlice(m::Op(), m::Copy(),
m::Op(), m::Op(), m::Op(),
m::Op(), m::Op())));
}
TEST_F(HostOffloaderTest, ParameterStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)}, s32[2,1]{1,0:T(2,128)})->(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
custom_call = s32[2,1]{1,0} custom-call(param_0), custom_call_target="MoveToDevice"
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, custom_call)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
ROOT tuple = (s32[2,1]{1,0}, s32[2,1]{1,0}) tuple(multiply_2, multiply_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* copy;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* tuple;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Copy(©, m::Parameter(¶m_0)));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
&tuple,
m::Multiply(&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1, m::ConstantScalar(4))),
multiplyPattern)));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, TupleParameterStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={((s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)S(5)}))->s32[2,1]{1,0:T(2,128)}}
ENTRY main {
param_tuple = (s32[2,1], s32[2,1]) parameter(0)
x = get-tuple-element(param_tuple), index=0
y_host = get-tuple-element(param_tuple), index=1
y = s32[2,1] custom-call(y_host), custom_call_target="MoveToDevice"
ROOT crs = s32[2,1] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* gte_x;
HloInstruction* gte_y;
HloInstruction* copy;
HloInstruction* add;
auto parameter_pattern = m::Parameter(¶m, 0);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Add(
&add, m::GetTupleElement(>e_x, parameter_pattern),
m::Copy(©, m::GetTupleElement(>e_y, parameter_pattern)))));
TestShapeHasMemorySpace(param->shape().tuple_shapes(0),
Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_x->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(add->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param->shape().tuple_shapes(1),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_y->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, ParameterStreamingNoOpToHost) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)})->s32[2,1]{1,0:T(2,128)}}
ENTRY main {
param = s32[2,1]{1,0} parameter(0)
to_host = s32[2,1]{1,0} custom-call(param), custom_call_target="MoveToHost"
ROOT to_device = s32[2,1]{1,0} custom-call(to_host), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
HloInstruction* param;
HloInstruction* copy;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©, m::Parameter(¶m, 0))));
TestShapeHasMemorySpace(param->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->(s32[2,1]{1,0:T(2,128)S(5)}, s32[2,1]{1,0:T(2,128)})}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
ROOT tuple = (s32[2,1]{1,0}, s32[2,1]{1,0}) tuple(custom_call, multiply_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* copy;
HloInstruction* tuple;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Parameter(¶m_0));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
&tuple,
m::Copy(©, m::Multiply(
&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1, m::ConstantScalar(4)))),
multiplyPattern)));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {1}),
Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, InvalidOutputStreaming) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
ROOT tuple = (s32[2,1]{1,0}, s32[2,1]{1,0}) tuple(custom_call, multiply_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::StatusOr<bool> result = RunHostOffloader(module.get());
EXPECT_FALSE(result.ok());
}
TEST_F(HostOffloaderTest, OutputStreamingWithoutTuple) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
ROOT custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* copy;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Parameter(¶m_0));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::Multiply(&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1,
m::ConstantScalar(4))))));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreamingCustomCallRoot) {
const std::string& hlo_string = R"(
HloModule ParameterStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)}, s32[2,1]{1,0:T(2,128)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param_0 = s32[2,1]{1,0} parameter(0)
param_1 = s32[2,1]{1,0} parameter(1)
constant_2 = s32[] constant(2)
constant_4 = s32[] constant(4)
broadcast_0 = s32[2,1]{1,0} broadcast(constant_2), dimensions={}
multiply_0 = s32[2,1]{1,0} multiply(param_1, broadcast_0)
multiply_1 = s32[2,1]{1,0} multiply(multiply_0, param_0)
broadcast_1 = s32[2,1]{1,0} broadcast(constant_4), dimensions={}
multiply_2 = s32[2,1]{1,0} multiply(multiply_1, broadcast_1)
ROOT custom_call = s32[2,1]{1,0} custom-call(multiply_2), custom_call_target="MoveToHost"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param_1;
HloInstruction* broadcast_0;
HloInstruction* multiply_0;
HloInstruction* param_0;
HloInstruction* multiply_1;
HloInstruction* broadcast_1;
HloInstruction* multiply_2;
HloInstruction* copy;
auto multiplyPattern =
m::Multiply(&multiply_1,
m::Multiply(&multiply_0, m::Parameter(¶m_1),
m::Broadcast(&broadcast_0, m::ConstantScalar(2))),
m::Parameter(¶m_0));
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
©, m::Multiply(&multiply_2, multiplyPattern,
m::Broadcast(&broadcast_1,
m::ConstantScalar(4))))));
TestShapeHasMemorySpace(param_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(param_0->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(broadcast_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(multiply_2->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreamingInUnrolledScanLoop) {
const std::string& hlo_string = R"(
HloModule m,
entry_computation_layout={(s32[16,16,8]{1,2,0:T(8,128)})->s32[16,16,8]{1,2,0:T(8,128)S(5)}},
allow_spmd_sharding_propagation_to_output={true}, num_partitions=2
body {
loop_peel_param = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) parameter(0)
get-tuple-element.12 = s32[]{:T(256)} get-tuple-element(loop_peel_param), index=0
constant.29 = s32[]{:T(256)} constant(1)
add.5 = s32[]{:T(256)} add(get-tuple-element.12, constant.29)
get-tuple-element.13 = s32[16,16,8]{1,2,0:T(8,128)} get-tuple-element(loop_peel_param), index=1
get-tuple-element.18 = s32[16,8]{0,1:T(8,128)} get-tuple-element(loop_peel_param), index=4
custom-call.3 = s32[16,8]{0,1:T(8,128)} custom-call(get-tuple-element.18), custom_call_target="MoveToHost"
bitcast = s32[1,16,8]{1,2,0:T(8,128)} bitcast(custom-call.3)
get-tuple-element.15 = s32[]{:T(256)} get-tuple-element(loop_peel_param), index=3
constant.30 = s32[]{:T(256)} constant(0)
dynamic-update-slice.2 = s32[16,16,8]{1,2,0:T(8,128)} dynamic-update-slice(get-tuple-element.13, bitcast, get-tuple-element.15, constant.30, constant.30), backend_config={"flag_configs":[],"scoped_memory_configs":[],"indices_config":{"index_known_bits":[{"zeroes":"0","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"}]},"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
get-tuple-element.14 = s32[16,16,8]{1,2,0:T(8,128)} get-tuple-element(loop_peel_param), index=2
dynamic-slice.2 = s32[1,16,8]{1,2,0:T(8,128)} dynamic-slice(get-tuple-element.14, get-tuple-element.12, constant.30, constant.30), dynamic_slice_sizes={1,16,8}
broadcast.8 = s32[1,16,8]{1,2,0:T(8,128)} broadcast(constant.29), dimensions={}
add.6 = s32[1,16,8]{1,2,0:T(8,128)} add(dynamic-slice.2, broadcast.8)
bitcast.1 = s32[16,8]{0,1:T(8,128)} bitcast(add.6)
ROOT tuple.3 = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) tuple(add.5, dynamic-update-slice.2, get-tuple-element.14, get-tuple-element.12, bitcast.1)
}
condition {
loop_peel_cond_param = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) parameter(0)
get-tuple-element.11 = s32[]{:T(256)} get-tuple-element(loop_peel_cond_param), index=0
constant.28 = s32[]{:T(256)} constant(16)
ROOT compare.1 = pred[]{:T(1024)} compare(get-tuple-element.11, constant.28), direction=LT
}
ENTRY entry {
constant.26 = s32[]{:T(256)} constant(1)
constant.24 = s32[]{:T(256)} constant(0)
broadcast.6 = s32[16,16,8]{1,2,0:T(8,128)} broadcast(constant.24), dimensions={}
param.2 = s32[16,16,8]{1,2,0:T(8,128)} parameter(0), sharding={devices=[1,1,2]<=[2]}
slice = s32[1,16,8]{1,2,0:T(8,128)} slice(param.2), slice={[0:1], [0:16], [0:8]}
broadcast.7 = s32[1,16,8]{1,2,0:T(8,128)} broadcast(constant.26), dimensions={}
add.4 = s32[1,16,8]{1,2,0:T(8,128)} add(slice, broadcast.7)
bitcast.2 = s32[16,8]{0,1:T(8,128)} bitcast(add.4)
tuple.4 = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) tuple(constant.26, broadcast.6, param.2, constant.24, bitcast.2)
while.1 = (s32[]{:T(256)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[16,16,8]{1,2,0:T(8,128)}, s32[]{:T(256)}, s32[16,8]{0,1:T(8,128)}) while(tuple.4), condition=condition, body=body
get-tuple-element.17 = s32[16,16,8]{1,2,0:T(8,128)} get-tuple-element(while.1), index=1
get-tuple-element.19 = s32[16,8]{0,1:T(8,128)} get-tuple-element(while.1), index=4
custom-call.4 = s32[16,8]{0,1:T(8,128)} custom-call(get-tuple-element.19), custom_call_target="MoveToHost"
bitcast.3 = s32[1,16,8]{1,2,0:T(8,128)} bitcast(custom-call.4)
get-tuple-element.16 = s32[]{:T(256)} get-tuple-element(while.1), index=3
ROOT dynamic-update-slice.3 = s32[16,16,8]{1,2,0:T(8,128)} dynamic-update-slice(get-tuple-element.17, bitcast.3, get-tuple-element.16, constant.24, constant.24), backend_config={"flag_configs":[],"scoped_memory_configs":[],"indices_config":{"index_known_bits":[{"zeroes":"0","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"},{"zeroes":"4294967295","ones":"0","bitwidth":"32"}]},"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* bitcast;
HloInstruction* gte_0;
HloInstruction* gte_1;
HloInstruction* dus;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice(
&dus, m::GetTupleElement(>e_0), m::Bitcast(&bitcast),
m::GetTupleElement(>e_1), m::ConstantScalar(0),
m::ConstantScalar(0))));
TestShapeHasMemorySpace(bitcast->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(dus->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, OutputStreamingNoOpToDevice) {
const std::string& hlo_string = R"(
HloModule OutputStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param = s32[2,1]{1,0} parameter(0)
to_device = s32[2,1]{1,0} custom-call(param), custom_call_target="MoveToDevice"
ROOT to_host = s32[2,1]{1,0} custom-call(to_device), custom_call_target="MoveToHost"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
HloInstruction* param;
HloInstruction* copy;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©, m::Parameter(¶m, 0))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy->shape(), Layout::kHostMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, ParameterAndOutputStreamingPassThrough) {
const std::string& hlo_string = R"(
HloModule OutputStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
ROOT param = s32[2,1]{1,0} parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(¶m, 0)));
TestShapeHasMemorySpace(param->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, ParameterAndOutputStreamingPassThroughTuple) {
const std::string& hlo_string = R"(
HloModule OutputStreaming, entry_computation_layout={(s32[2,1]{1,0:T(2,128)S(5)})->s32[2,1]{1,0:T(2,128)S(5)}}
ENTRY main {
param = s32[2,1]{1,0} parameter(0)
tuple = (s32[2,1]{1,0}) tuple(param)
ROOT gte = s32[2,1]{1,0} get-tuple-element(tuple), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* tuple;
HloInstruction* gte;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
>e, m::Tuple(&tuple, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(tuple->shape(), {0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(gte->shape(), Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest, LoneMoveToDevice) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(¶m, 0)));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToHost) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToHost"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_1), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToDevice) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_1), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToHostNonSequential) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, RepeatedMoveToDeviceNonSequential) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main {
param_0 = f32[16,256]{0,1} parameter(0)
custom_call_0 = f32[16,256]{0,1} custom-call(param_0), custom_call_target="MoveToHost"
custom_call_1 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
ROOT custom_call_2 = f32[16,256]{0,1} custom-call(custom_call_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* param;
HloInstruction* copy_to_host;
HloInstruction* copy_to_device;
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(©_to_device,
m::Copy(©_to_host, m::Parameter(¶m, 0)))));
TestShapeHasMemorySpace(param->shape(), Layout::kDefaultMemorySpace);
TestShapeHasMemorySpace(copy_to_host->shape(), Layout::kHostMemorySpace);
TestShapeHasMemorySpace(copy_to_device->shape(), Layout::kDefaultMemorySpace);
EXPECT_FALSE(HaveRemainingOffloadAnnotations(module.get()));
}
TEST_F(HostOffloaderTest, BasicAsyncHostOffloadedCall_RemoveRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096], f32[4096]) tuple(%gte_0_host, %gte_1_host)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_0_host = FindInstruction(module.get(), "gte_0_host");
ASSERT_EQ(gte_0_host, nullptr);
HloInstruction* gte_1_host = FindInstruction(module.get(), "gte_1_host");
ASSERT_EQ(gte_1_host, nullptr);
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
std::vector<HloInstruction*> expected = {gte_0, gte_1};
EXPECT_THAT(tuple->operands(),
::testing::UnorderedElementsAreArray(expected));
}
TEST_F(HostOffloaderTest,
BasicAsyncHostOffloadedCall_NoChangesWhenEntryLayoutExpectsHBM) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(0)}, f32[4096]{0:S(0)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
ROOT %tuple = (f32[4096], f32[4096]) tuple(%gte_0, %gte_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunHostOffloader(module.get()));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kDefaultMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kDefaultMemorySpace);
}
TEST_F(HostOffloaderTest,
BasicAsyncHostOffloadedCall_RemoveOnlyRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add_res = f32[] add(%lhs, %rhs)
}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%sum = f32[4096] add(%gte_0, %gte_0)
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096]{0:S(5)}, f32[4096]) tuple(%gte_0_host, %gte_1_host)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kDefaultMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_0_host = FindInstruction(module.get(), "gte_0_host");
ASSERT_EQ(gte_0_host, nullptr);
HloInstruction* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_EQ(copy->operands()[0], gte_0);
HloInstruction* gte_1_host = FindInstruction(module.get(), "gte_1_host");
ASSERT_EQ(gte_1_host, nullptr);
}
TEST_F(HostOffloaderTest,
AsyncHostOffloadedCall_nonEntryPoint_RemoveRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
%non_async_computation {
%param_0 = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%param_0), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple_non_async = (f32[4096]{0:S(5)}, f32[4096]) tuple(%gte_0_host, %gte_1_host)
}
ENTRY %main {
%a = f32[4096] parameter(0)
%call = (f32[4096], f32[4096]) call(%a), to_apply=%non_async_computation
%call_0 = f32[4096] get-tuple-element(%call), index=0
%call_1 = f32[4096] get-tuple-element(%call), index=1
ROOT %tuple = (f32[4096], f32[4096]) tuple(%call_0, %call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_0_host = FindInstruction(module.get(), "gte_0_host");
ASSERT_EQ(gte_0_host, nullptr);
HloInstruction* gte_1_host = FindInstruction(module.get(), "gte_1_host");
ASSERT_EQ(gte_1_host, nullptr);
HloInstruction* tuple_non_async =
FindInstruction(module.get(), "tuple_non_async");
ASSERT_NE(tuple_non_async, nullptr);
std::vector<HloInstruction*> expected = {gte_0, gte_1};
EXPECT_THAT(tuple_non_async->operands(),
::testing::UnorderedElementsAreArray(expected));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(0),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(1),
Layout::kHostMemorySpace);
}
TEST_F(HostOffloaderTest,
AsyncHostOffloadedCall_passedToCall_RemoveRedundantCopies) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
%non_async_computation {
%param_0_non_async = f32[4096] parameter(0)
%param_1_non_async = f32[4096] parameter(1)
ROOT %tuple_non_async = (f32[4096], f32[4096]) tuple(%param_0_non_async, %param_1_non_async)
}
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%call = (f32[4096], f32[4096]) call(%gte_0, %gte_1), to_apply=%non_async_computation
%call_0 = f32[4096] get-tuple-element(%call), index=0
%call_1 = f32[4096] get-tuple-element(%call), index=1
%call_0_host = f32[4096] custom-call(%call_0), custom_call_target="MoveToHost"
%call_1_host = f32[4096] custom-call(%call_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096], f32[4096]) tuple(%call_0_host, %call_1_host)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kHostMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kHostMemorySpace);
HloInstruction* call_0 = FindInstruction(module.get(), "call_0");
ASSERT_NE(call_0, nullptr);
HloInstruction* call_1 = FindInstruction(module.get(), "call_1");
ASSERT_NE(call_1, nullptr);
HloInstruction* call_0_host = FindInstruction(module.get(), "call_0_host");
ASSERT_EQ(call_0_host, nullptr);
HloInstruction* call_1_host = FindInstruction(module.get(), "call_1_host");
ASSERT_EQ(call_1_host, nullptr);
HloInstruction* param_0_non_async =
FindInstruction(module.get(), "param_0_non_async");
ASSERT_NE(param_0_non_async, nullptr);
TestShapeHasMemorySpace(param_0_non_async->shape(), Layout::kHostMemorySpace);
HloInstruction* param_1_non_async =
FindInstruction(module.get(), "param_1_non_async");
ASSERT_NE(param_1_non_async, nullptr);
TestShapeHasMemorySpace(param_1_non_async->shape(), Layout::kHostMemorySpace);
HloInstruction* tuple_non_async =
FindInstruction(module.get(), "tuple_non_async");
ASSERT_NE(tuple_non_async, nullptr);
std::vector<HloInstruction*> expected_operands = {param_0_non_async,
param_1_non_async};
EXPECT_THAT(tuple_non_async->operands(),
::testing::UnorderedElementsAreArray(expected_operands));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(0),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(tuple->shape().tuple_shapes(1),
Layout::kHostMemorySpace);
std::vector<HloInstruction*> expected = {call_0, call_1};
EXPECT_THAT(tuple->operands(),
::testing::UnorderedElementsAreArray(expected));
}
TEST_F(HostOffloaderTest,
AsyncHostOffloadedCall_passedToAsyncHostOffloadedCall_NoCopiesRemoved) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={(f32[4096]{0:S(5)})->(f32[4096]{0:S(5)}, f32[4096]{0:S(5)}, f32[4096]{0:S(0)}, f32[4096]{0:S(0)})}
%async_computation {
%param_0 = f32[4096] parameter(0)
ROOT %offloaded-custom-call = (f32[4096], f32[4096]) custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
%extra_async_computation {
%param_0_extra_async = f32[4096] parameter(0)
%param_1_extra_async = f32[4096] parameter(1)
ROOT %offloaded-extra-custom-call = (f32[4096], f32[4096]) custom-call(%param_0_extra_async, %param_1_extra_async), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY %main {
%a = f32[4096] parameter(0)
%async-start = ((f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%a), async_execution_thread="host", calls=%async_computation
%async-done = (f32[4096], f32[4096]) custom-call-done(%async-start)
%gte_0 = f32[4096] get-tuple-element(%async-done), index=0
%gte_1 = f32[4096] get-tuple-element(%async-done), index=1
%extra-async-start = ((f32[4096], f32[4096]), (f32[4096], f32[4096]), u32[]) async-start(%gte_0, %gte_1), async_execution_thread="host", calls=%extra_async_computation
%extra-async-done = (f32[4096], f32[4096]) custom-call-done(%extra-async-start)
%call_0 = f32[4096] get-tuple-element(%extra-async-done), index=0
%call_1 = f32[4096] get-tuple-element(%extra-async-done), index=1
%gte_0_host = f32[4096] custom-call(%gte_0), custom_call_target="MoveToHost"
%gte_1_host = f32[4096] custom-call(%gte_1), custom_call_target="MoveToHost"
ROOT %tuple = (f32[4096], f32[4096], f32[4096], f32[4096]) tuple(%gte_0_host, %gte_1_host, %call_0, %call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0");
ASSERT_NE(gte_0, nullptr);
TestShapeHasMemorySpace(gte_0->shape(), Layout::kDefaultMemorySpace);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte_1");
ASSERT_NE(gte_1, nullptr);
TestShapeHasMemorySpace(gte_1->shape(), Layout::kDefaultMemorySpace);
}
TEST_F(HostOffloaderTest, OffloadPassedToEntryComputationRoot) {
const std::string& hlo_string = R"(
HloModule m, entry_computation_layout={()->(s32[]{:T(128)})}
ENTRY %main {
c = s32[] constant(1)
custom-call.331 = s32[]{:T(128)} custom-call(c), custom_call_target="MoveToHost"
custom-call.332 = s32[]{:T(128)} custom-call(custom-call.331), custom_call_target="MoveToDevice"
ROOT tuple = (s32[]{:T(128)}) tuple(custom-call.332)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << "module after: " << module->ToString();
}
TEST_F(HostOffloaderTest, MoveToHostInsideWhileLoopBodyShareSameBroadcast) {
const absl::string_view hlo_string = R"(
HloModule MoveToHostFoundOutsideAndInsideOfWhileLoop, entry_computation_layout={(s32[],f32[1,1,128,128],f32[1,1,128,128])->(f32[8,1,128,128]{3,2,1,0:T(8,128)S(5)}, f32[8,1,128,128]{3,2,1,0:T(8,128)S(5)}, f32[1,1,128,128], f32[1,1,128,128], s32[], s32[])}
while_condition {
condition_param = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) parameter(0)
condition_current_iteration_index = s32[] get-tuple-element(condition_param), index=5
condition_iteration_count = s32[] constant(16)
ROOT condition_result = pred[] compare(condition_current_iteration_index, condition_iteration_count), direction=LT
}
while_body {
while_body_input_tuple = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) parameter(0)
host_tensor_1 = f32[8,1,128,128] get-tuple-element(while_body_input_tuple), index=0
host_tensor_2 = f32[8,1,128,128] get-tuple-element(while_body_input_tuple), index=1
update_1 = f32[1,1,128,128] get-tuple-element(while_body_input_tuple), index=2
update_2 = f32[1,1,128,128] get-tuple-element(while_body_input_tuple), index=3
offset_dus = s32[] get-tuple-element(while_body_input_tuple), index=4
while_body_num_iter = s32[] get-tuple-element(while_body_input_tuple), index=5
mth_tensor_1 = f32[8,1,128,128] custom-call(host_tensor_1), custom_call_target="MoveToHost"
mth_tensor_2 = f32[8,1,128,128] custom-call(host_tensor_2), custom_call_target="MoveToHost"
constant_zero = s32[] constant(0)
host_dus_1 = f32[8,1,128,128]{3,2,1,0:T(8,128)} dynamic-update-slice(mth_tensor_1, update_1, offset_dus, constant_zero, constant_zero, constant_zero)
host_dus_2 = f32[8,1,128,128]{3,2,1,0:T(8,128)} dynamic-update-slice(mth_tensor_2, update_2, offset_dus, constant_zero, constant_zero, constant_zero)
ROOT while_output_tuple = tuple(host_dus_1,host_dus_2, update_1, update_2, offset_dus, while_body_num_iter)
}
ENTRY main {
offset = s32[] parameter(0)
update = f32[1,1,128,128] parameter(1)
update2 = f32[1,1,128,128] parameter(2)
constant = f32[] constant(1.0)
broadcast = f32[8,1,128,128] broadcast(constant)
shared_host_memory = f32[8,1,128,128] custom-call(broadcast), custom_call_target="MoveToHost"
tuple_for_while = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) tuple(shared_host_memory, shared_host_memory, update, update2, offset, offset)
ROOT while = (f32[8,1,128,128], f32[8,1,128,128], f32[1,1,128,128], f32[1,1,128,128], s32[], s32[]) while(tuple_for_while), condition=while_condition, body=while_body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(HostOffloaderTest, RemoveRedundantCopiesBackToHostOutputIsNonTuple) {
const absl::string_view hlo_string = R"(
HloModule jit_main, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) }, entry_computation_layout={(f32[1048576]{0:T(1024)}, f32[25769803776]{0:T(1024)S(5)})->(f32[1048576]{0:T(1024)}, f32[25769803776]{0:T(1024)S(5)})}, allow_spmd_sharding_propagation_to_parameters={false,false}, allow_spmd_sharding_propagation_to_output={false,false}
%host_fn.6 (Arg_0.7: f32[25769803776]) -> f32[25769803776] {
%Arg_0.7 = f32[25769803776]{0} parameter(0), metadata={op_name="jit(main)/jit(main)/pjit"}
%constant.8 = f32[] constant(1)
%broadcast.9 = f32[25769803776]{0} broadcast(f32[] %constant.8), dimensions={}, metadata={op_name="jit(main)/jit(main)/jit(host_fn)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1448}
ROOT %add.10 = f32[25769803776]{0} add(f32[25769803776]{0} %Arg_0.7, f32[25769803776]{0} %broadcast.9), frontend_attributes={_xla_compute_type="host"}, metadata={op_name="jit(main)/jit(main)/jit(host_fn)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1448}
}, execution_thread="host"
ENTRY %main.17 (Arg_0.1: f32[1048576], Arg_1.2: f32[25769803776]) -> (f32[1048576], f32[25769803776]) {
%Arg_0.1 = f32[1048576]{0:T(1024)} parameter(0), sharding={replicated}, metadata={op_name="a"}
%constant.3 = f32[]{:T(128)} constant(1)
%broadcast.4 = f32[1048576]{0:T(1024)} broadcast(f32[]{:T(128)} %constant.3), dimensions={}, metadata={op_name="jit(main)/jit(main)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1454}
%add.5 = f32[1048576]{0:T(1024)} add(f32[1048576]{0:T(1024)} %Arg_0.1, f32[1048576]{0:T(1024)} %broadcast.4), metadata={op_name="jit(main)/jit(main)/add" source_file="third_party/py/jax/tests/memories_test.py" source_line=1454}
%custom-call = f32[1048576]{0:T(1024)} custom-call(f32[1048576]{0:T(1024)} %add.5), custom_call_target="MoveToDevice"
%Arg_1.2 = f32[25769803776]{0:T(1024)} parameter(1), sharding={replicated}, metadata={op_name="b"}
%host-async-start = ((f32[25769803776]{0:T(1024)}), f32[25769803776]{0:T(1024)}, u32[]{:T(128)}) custom-call-start(f32[25769803776]{0:T(1024)} %Arg_1.2), async_execution_thread="host", custom_call_target="HostExecute", called_computations={%host_fn.6}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
%host-async-done = f32[25769803776]{0:T(1024)} custom-call-done(((f32[25769803776]{0:T(1024)}), f32[25769803776]{0:T(1024)}, u32[]{:T(128)}) %host-async-start), backend_config={"flag_configs":[],"scoped_memory_configs":[],"device_type":"DEVICE_TYPE_HOST","used_scoped_memory_configs":[]}
%redundant-move-to-host = f32[25769803776]{0:T(1024)} custom-call(f32[25769803776]{0:T(1024)} %host-async-done), custom_call_target="MoveToHost"
ROOT %output_tuple = (f32[1048576]{0:T(1024)}, f32[25769803776]{0:T(1024)}) tuple(f32[1048576]{0:T(1024)} %custom-call, f32[25769803776]{0:T(1024)} %redundant-move-to-host), sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloader(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
ASSERT_NE(async_start, nullptr);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(async_start->shape(), {0, 0}),
Layout::kHostMemorySpace);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(async_start->shape(), {1}),
Layout::kHostMemorySpace);
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
ASSERT_NE(async_done, nullptr);
TestShapeHasMemorySpace(async_done->shape(), Layout::kHostMemorySpace);
HloInstruction* output_tuple = FindInstruction(module.get(), "output_tuple");
ASSERT_NE(output_tuple, nullptr);
TestShapeHasMemorySpace(ShapeUtil::GetSubshape(output_tuple->shape(), {1}),
Layout::kHostMemorySpace);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5affd7b-6c08-4985-9501-fbeefc796907 | cpp | tensorflow/tensorflow | real_imag_expander | third_party/xla/xla/service/real_imag_expander.cc | third_party/xla/xla/service/real_imag_expander_test.cc | #include "xla/service/real_imag_expander.h"
#include "xla/literal_util.h"
namespace xla {
bool RealImagExpander::InstructionMatchesPattern(HloInstruction* inst) {
return (inst->opcode() == HloOpcode::kReal ||
inst->opcode() == HloOpcode::kImag) &&
!ShapeUtil::ElementIsComplex(inst->operand(0)->shape());
}
absl::StatusOr<HloInstruction*> RealImagExpander::ExpandInstruction(
HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kReal) {
return inst->mutable_operand(0);
} else {
HloComputation* comp = inst->parent();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(inst->operand(0)->shape().element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(inst->shape(), zero, {}));
return zero;
}
}
} | #include "xla/service/real_imag_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
namespace m = match;
class RealImagExpanderTest : public HloTestBase {};
TEST_F(RealImagExpanderTest, RealWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule real_float
ENTRY main {
input = f32[4] parameter(0)
ROOT real = real(input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Parameter(0)));
}
TEST_F(RealImagExpanderTest, ImagWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule imag_float
ENTRY main {
input = f32[4,2,8] parameter(0)
ROOT imag = imag(input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Broadcast()));
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(RealImagExpanderTest, RealImagWithComplexInput) {
const char* kModuleStr = R"(
HloModule real_float
ENTRY main {
input = c64[4] parameter(0)
real = real(input)
imag = imag(input)
ROOT t = tuple(real, imag)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(RealImagExpanderTest, MultipleImagWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule imag_float
ENTRY main {
input = f32[4,2,8] parameter(0)
imag1 = imag(input)
ROOT imag2 = imag(imag1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
auto param = module->entry_computation()->parameter_instruction(0);
HloInstruction* imag1 =
module->entry_computation()->root_instruction()->mutable_operand(0);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_imag,
MakeUnaryHlo(HloOpcode::kImag, param));
TF_ASSERT_OK(
module->entry_computation()->ReplaceInstruction(imag1, new_imag));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Broadcast()));
XLA_VLOG_LINES(1, module->ToString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/real_imag_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/real_imag_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7ff3d883-8d11-4d35-95d6-3c1c628ac7ce | cpp | tensorflow/tensorflow | while_util | third_party/xla/xla/service/while_util.cc | third_party/xla/xla/service/while_util_test.cc | #include "xla/service/while_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileCondition(HloComputation* narrow_condition, const Shape& wide_shape) {
const Shape& narrow_shape =
narrow_condition->parameter_instruction(0)->shape();
HloComputation* wide_while_cond = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_condition->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.",
narrow_condition->parameter_instruction(0)->name())));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return narrow_condition->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_while_cond->parameter_instruction(0),
narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_cond->parameter_instruction(0)->name()));
HloInstruction* call_narrow_cond = wide_while_cond->AddInstruction(
HloInstruction::CreateCall(ShapeUtil::MakeShape(PRED, {}),
{truncated_parameter}, narrow_condition));
wide_while_cond->set_root_instruction(call_narrow_cond);
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_cond));
return {{wide_while_cond, std::move(inlined_instructions_map)}};
}
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileBody(HloComputation* narrow_body, const Shape& wide_shape) {
const Shape& narrow_shape = narrow_body->parameter_instruction(0)->shape();
HloComputation* wide_while_body = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_body->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_body->parameter_instruction(0)->name())));
return narrow_body->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_while_body->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_body->parameter_instruction(0)->name()));
HloInstruction* call_narrow_body =
wide_while_body->AddInstruction(HloInstruction::CreateCall(
narrow_shape, {truncated_parameter}, narrow_body));
std::vector<HloInstruction*> live_through_values;
for (int i = narrow_shape.tuple_shapes_size();
i < wide_shape.tuple_shapes_size(); i++) {
live_through_values.push_back(wide_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(wide_shape.tuple_shapes(i),
wide_parameter, i),
absl::StrCat(wide_while_body->name(), ".through.",
i - narrow_shape.tuple_shapes_size())));
}
wide_while_body->set_root_instruction(
TupleUtil::AppendSuffix(call_narrow_body, live_through_values));
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_body));
return {{wide_while_body, std::move(inlined_instructions_map)}};
}
absl::StatusOr<WhileUtil::MakeInstructionsLiveInResult>
WhileUtil::MakeInstructionsLiveIn(
HloInstruction* while_instr,
absl::Span<HloInstruction* const> instructions) {
CHECK(while_instr->shape().IsTuple());
int elements_in_old_while_shape = while_instr->shape().tuple_shapes_size();
Shape new_while_shape = while_instr->shape();
for (auto* instruction : instructions) {
*new_while_shape.add_tuple_shapes() = instruction->shape();
}
HloComputation* new_while_condition;
CallInliner::InlinedInstructionMap inlined_condition_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_condition, inlined_condition_instructions_map),
WidenWhileCondition(while_instr->while_condition(), new_while_shape));
HloComputation* new_while_body;
CallInliner::InlinedInstructionMap inlined_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_body, inlined_instructions_map),
WidenWhileBody(while_instr->while_body(), new_while_shape));
HloInstruction* new_while_init =
TupleUtil::AppendSuffix(while_instr->mutable_operand(0), instructions);
HloComputation* containing_computation = while_instr->parent();
HloInstruction* new_while = containing_computation->AddInstruction(
HloInstruction::CreateWhile(new_while_shape, new_while_condition,
new_while_body, new_while_init));
HloInstruction* replacement_instr = TupleUtil::ExtractPrefix(
new_while, while_instr->shape().tuple_shapes_size());
TF_RETURN_IF_ERROR(while_instr->ReplaceAllUsesWith(replacement_instr));
TF_RETURN_IF_ERROR(containing_computation->RemoveInstruction(while_instr));
HloInstruction* while_body_param = new_while_body->parameter_instruction(0);
std::vector<HloInstruction*> live_in_instructions;
for (int64_t i = elements_in_old_while_shape;
i < new_while_shape.tuple_shapes_size(); i++) {
live_in_instructions.push_back(new_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(
instructions[i - elements_in_old_while_shape]->shape(),
while_body_param, i),
absl::StrCat(new_while_body->name(), ".in.",
i - elements_in_old_while_shape)));
}
WhileUtil::MakeInstructionsLiveInResult result;
result.new_while_instr = new_while;
result.replacement_instr = replacement_instr;
result.while_body_live_in_values = std::move(live_in_instructions);
result.while_body_instruction_map = std::move(inlined_instructions_map);
result.while_condition_instruction_map =
std::move(inlined_condition_instructions_map);
return std::move(result);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopConditionComputation(const Shape& loop_state_shape,
int32_t trip_count) {
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> cond_computation,
CreateComputationWithSignature(
{&loop_state_shape}, scalar_pred, "while_cond"));
HloInstruction* trip_count_constant =
cond_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(trip_count)));
HloInstruction* param = cond_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * compare,
MakeCompareHlo(ComparisonDirection::kLt, indvar, trip_count_constant));
cond_computation->set_root_instruction(compare);
return std::move(cond_computation);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopBodyComputation(
const Shape& loop_state_shape,
absl::FunctionRef<absl::StatusOr<WhileUtil::LoopStateTy>(
HloInstruction*, const WhileUtil::LoopStateTy&)>
loop_body_generator) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> body_computation,
CreateComputationWithSignature(
{&loop_state_shape}, loop_state_shape, "while_body"));
HloInstruction* one = body_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
HloInstruction* param = body_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(HloInstruction * next_indvar,
MakeBinaryHlo(HloOpcode::kAdd, indvar, one));
std::vector<HloInstruction*> loop_body_generator_args;
for (int i = 1, e = loop_state_shape.tuple_shapes_size(); i < e; i++) {
TF_ASSIGN_OR_RETURN(HloInstruction * tuple_element,
MakeGetTupleElementHlo(param, i));
loop_body_generator_args.push_back(tuple_element);
}
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> next_state,
loop_body_generator(indvar, loop_body_generator_args));
next_state.insert(next_state.begin(), next_indvar);
HloInstruction* next_state_tuple =
body_computation->AddInstruction(HloInstruction::CreateTuple(next_state));
body_computation->set_root_instruction(next_state_tuple);
return std::move(body_computation);
}
static std::pair<std::unique_ptr<HloInstruction>,
std::unique_ptr<HloInstruction>>
MakeInitTupleFromInitValues(const WhileUtil::LoopStateTy& init_values) {
std::vector<HloInstruction*> init_values_with_indvar;
init_values_with_indvar.reserve(init_values.size() + 1);
std::unique_ptr<HloInstruction> zero =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0));
init_values_with_indvar.push_back(zero.get());
absl::c_copy(init_values, std::back_inserter(init_values_with_indvar));
return std::make_pair(std::move(zero),
HloInstruction::CreateTuple(init_values_with_indvar));
}
static Shape MakeLoopStateShapeWithLayout(
const WhileUtil::LoopStateTy& init_values) {
std::vector<Shape> loop_state_shape_components;
loop_state_shape_components.reserve(init_values.size() + 1);
loop_state_shape_components.push_back(ShapeUtil::MakeShape(S32, {}));
absl::c_transform(init_values,
std::back_inserter(loop_state_shape_components),
[](HloInstruction* instr) {
Shape shape = instr->shape();
if (!shape.has_layout()) {
LayoutUtil::SetToDefaultLayout(&shape);
}
return shape;
});
return ShapeUtil::MakeTupleShape(loop_state_shape_components);
}
absl::StatusOr<WhileUtil::OwningLoopStateTy>
WhileUtil::MakeCountedLoop(HloModule* module, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
CHECK_GE(trip_count, 0);
Shape loop_state_shape = MakeLoopStateShapeWithLayout(init_values);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> cond,
MakeCountedLoopConditionComputation(loop_state_shape, trip_count));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> body,
MakeCountedLoopBodyComputation(loop_state_shape, loop_body_generator));
std::unique_ptr<HloInstruction> owned_indvar;
std::unique_ptr<HloInstruction> owned_init_tuple;
std::tie(owned_indvar, owned_init_tuple) =
MakeInitTupleFromInitValues(init_values);
std::unique_ptr<HloInstruction> owned_while = HloInstruction::CreateWhile(
loop_state_shape, module->AddEmbeddedComputation(std::move(cond)),
module->AddEmbeddedComputation(std::move(body)), owned_init_tuple.get());
owned_while->set_metadata(metadata);
HloInstruction* while_instr = owned_while.get();
std::vector<std::unique_ptr<HloInstruction>> owned;
owned.push_back(std::move(owned_indvar));
owned.push_back(std::move(owned_init_tuple));
owned.push_back(std::move(owned_while));
std::vector<HloInstruction*> while_results;
for (int64_t i = 0, e = init_values.size(); i < e; i++) {
std::unique_ptr<HloInstruction> user_state =
HloInstruction::CreateGetTupleElement(init_values[i]->shape(),
while_instr, i + 1);
while_results.push_back(user_state.get());
owned.push_back(std::move(user_state));
}
return WhileUtil::OwningLoopStateTy{std::move(owned), while_results};
}
absl::StatusOr<WhileUtil::LoopStateTy> WhileUtil::MakeCountedLoop(
HloComputation* computation, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
TF_ASSIGN_OR_RETURN(
auto owning_loop_state,
MakeCountedLoop(computation->parent(), trip_count, init_values,
loop_body_generator, metadata));
for (auto& instruction_to_add : owning_loop_state.instructions_to_add) {
computation->AddInstruction(std::move(instruction_to_add));
}
return owning_loop_state.while_results;
}
std::vector<HloInstruction*> WhileUtil::GetInvariantGTEsForWhileBody(
const HloComputation& while_body) {
std::vector<HloInstruction*> result;
const HloInstruction::InstructionVector root_operands =
while_body.root_instruction()->operands();
for (int i = 0; i < root_operands.size(); i++) {
HloInstruction* instr = root_operands[i];
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == i &&
instr->operand(0) == while_body.parameter_instruction(0)) {
result.push_back(instr);
}
}
return result;
}
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
WhileUtil::GetGTEsMapForWhileConditional(
const HloComputation& while_conditional) {
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>> result;
for (HloInstruction* user :
while_conditional.parameter_instruction(0)->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
result[user->tuple_index()].push_back(user);
}
}
return result;
}
} | #include "xla/service/while_util.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileUtilTest : public HloTestBase {
protected:
absl::StatusOr<std::unique_ptr<VerifiedHloModule>> GetParsedModule(
HloComputation** entry_computation, HloInstruction** param0,
HloInstruction** param1, HloInstruction** param2) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
while_body {
ROOT p_body = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
}
while_condition {
p_cond = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
p_entry_0 = f32[32,32]{1,0} parameter(0)
p_entry_1 = s32[32,32]{1,0} parameter(1)
p_entry_2 = s64[32,32]{1,0} parameter(2)
while_init = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p_entry_0, p_entry_0)
ROOT while = (f32[32,32]{1,0}, f32[32,32]{1,0}) while(while_init), condition=while_condition, body=while_body
}
)";
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
*entry_computation = module->entry_computation();
*param0 = (*entry_computation)->parameter_instruction(0);
*param1 = (*entry_computation)->parameter_instruction(1);
*param2 = (*entry_computation)->parameter_instruction(2);
return std::move(module);
}
};
TEST_F(WhileUtilTest, MakeZeroInstructionsLiveOp) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, {}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(param_reconstructed, 0),
op::GetTupleElement(param_reconstructed, 1)));
}
TEST_F(WhileUtilTest, MakeTwoInstructionsLive) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{param0, param1}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
XLA_VLOG_LINES(3, module->ToString());
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto first_half_param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(first_half_param_reconstructed, 0),
op::GetTupleElement(first_half_param_reconstructed, 1),
op::GetTupleElement(op::Parameter(0), 2),
op::GetTupleElement(op::Parameter(0), 3)));
}
TEST_F(WhileUtilTest, GetInvariantGTEsForWhileBody) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* while_body = module->GetComputationWithName("body");
ASSERT_NE(while_body, nullptr)
<< "Expected exactly one while_body computation";
std::vector<HloInstruction*> gte_list =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
ASSERT_EQ(gte_list.size(), 1);
EXPECT_EQ((*gte_list.begin())->name(), "gte.0");
}
TEST_F(WhileUtilTest, AlwaysRemovePreviousWhileBody) {
const char* const hlo_string = R"(
HloModule WhileWithSideEffects
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
token0 = token[] after-all()
infeed = (pred[], token[]) infeed(token0)
ROOT condition = pred[] get-tuple-element(infeed), index=0
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
to_make_live_in = f32[100] parameter(1)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* main = module->GetComputationWithName("main");
HloInstruction* while_instr = main->root_instruction();
HloInstruction* to_make_live_in = main->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{to_make_live_in}));
auto is_while = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kWhile;
};
EXPECT_EQ(absl::c_count_if(main->instructions(), is_while), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d87c4941-ceea-46ea-a6dd-ad59cb70dad0 | cpp | tensorflow/tensorflow | loop_schedule_linearizer | third_party/xla/xla/service/loop_schedule_linearizer.cc | third_party/xla/xla/service/loop_schedule_linearizer_test.cc | #include "xla/service/loop_schedule_linearizer.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ComputationInstructionOrdering {
public:
explicit ComputationInstructionOrdering(const HloComputation& computation) {
for (const HloInstruction* instr : computation.instructions()) {
for (const HloInstruction* control_pred : instr->control_predecessors()) {
CHECK(this->InsertEdge(*control_pred, *instr))
<< "Graph already contained a cycle";
}
for (int op_id = 0; op_id < instr->operand_count(); op_id++) {
const HloInstruction* op = instr->operand(op_id);
CHECK(this->InsertEdge(*op, *instr))
<< "Graph already contained a cycle";
}
}
}
int32_t NodeIdForInstruction(const HloInstruction& instr) {
int32_t instruction_id = instr.unique_id();
auto it = node_id_to_graph_id_.find(instruction_id);
if (it != node_id_to_graph_id_.end()) {
return it->second;
}
int32_t node_id = graph_cycles_.NewNode();
node_id_to_graph_id_[instruction_id] = node_id;
return node_id;
}
bool InsertEdge(const HloInstruction& source, const HloInstruction& dest) {
int32_t source_id = NodeIdForInstruction(source);
int32_t dest_id = NodeIdForInstruction(dest);
return graph_cycles_.InsertEdge(source_id, dest_id);
}
private:
absl::flat_hash_map<int32_t, int32_t> node_id_to_graph_id_;
GraphCycles graph_cycles_;
};
}
static absl::StatusOr<bool> AddControlEdgesForLoopWrites(
HloInstruction* xla_while, HloAliasAnalysis& alias_analysis) {
HloDataflowAnalysis& dataflow = alias_analysis.dataflow_analysis();
HloComputation* body = xla_while->while_body();
HloInstruction* root = body->root_instruction();
HloInstruction* input = body->parameter_instruction(0);
bool changed = false;
ComputationInstructionOrdering ordering(*body);
ShapeTree<bool> indices_to_copy(&xla_while->shape());
for (auto& p : indices_to_copy) {
const ShapeIndex& index = p.first;
if (index.empty()) {
continue;
}
if (dataflow.GetValueSet(root, index).values().size() > 1 ||
dataflow.GetValueSet(input, index).values().size() > 1) {
VLOG(2) << "Index " << index.ToString() << " is associated with multiple "
<< "values, not attempting to introduce stricter dependencies";
} else {
HloValue& value_at_root = dataflow.GetUniqueValueAt(root, index);
HloValue& value_at_input = dataflow.GetUniqueValueAt(input, index);
if (value_at_root.shape().IsTuple()) {
continue;
}
HloInstruction* write = value_at_root.defining_instruction();
for (const HloUse& use : value_at_input.GetUses()) {
HloInstruction* read = use.instruction;
if (read != write &&
value_at_root != value_at_input
&& read->parent() == write->parent()) {
VLOG(2) << "Inside " << body->name() << ", index "
<< index.ToString();
if (!ordering.InsertEdge(*read, *write)) {
VLOG(2) << "Not adding a control dependency from "
<< read->ToShortString() << " to " << write->ToShortString()
<< " as it would introduce a cycle";
continue;
}
if (!absl::c_linear_search(read->control_successors(), write)) {
TF_RETURN_IF_ERROR(read->AddControlDependencyTo(write));
VLOG(2) << "Adding dependency: " << read->ToShortString()
<< " before " << write->ToShortString();
changed = true;
}
}
}
}
}
return changed;
}
absl::StatusOr<bool> LoopScheduleLinearizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<HloAliasAnalysis> alias_analysis;
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
const HloComputation* body = instruction->while_body();
bool has_async_collectives =
absl::c_any_of(body->instructions(), [](const HloInstruction* instr) {
return hlo_query::IsAsyncCollectiveStartOp(
instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(
instr, true);
});
if (has_async_collectives) {
VLOG(2) << "Skipping " << instruction->name()
<< " since body has async collectives";
continue;
}
if (alias_analysis == nullptr) {
TF_ASSIGN_OR_RETURN(alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
}
TF_ASSIGN_OR_RETURN(bool updated_loop, AddControlEdgesForLoopWrites(
instruction, *alias_analysis));
changed |= updated_loop;
}
}
return changed;
}
} | #include "xla/service/loop_schedule_linearizer.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/copy_insertion.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
int64_t CountControlEdges(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountControlEdges(*computation);
}
return count;
}
class LoopScheduleLinearizerTest : public HloTestBase {
protected:
void InsertCopies(HloModule* module, bool expect_change) {
LoopScheduleLinearizer loop_schedule_linearizer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, loop_schedule_linearizer.Run(module));
ASSERT_EQ(changed, expect_change);
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module).status());
}
};
TEST_F(LoopScheduleLinearizerTest, NoExtraCopiesRequired) {
absl::string_view hlo_string = R"(
HloModule module
while_body {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
buffer = s32[] get-tuple-element(input), index=1
one = s32[] constant(1)
updated_counter = s32[] add(counter, one)
updated_buffer = s32[] add(buffer, counter)
ROOT out = (s32[], s32[]) tuple(updated_counter, updated_buffer)
}
while_cond {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
bound = s32[] constant(100)
ROOT cmp = pred[] compare(counter, bound), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
buffer = s32[] parameter(0)
while_input = (s32[], s32[]) tuple(zero, buffer)
ROOT out = (s32[], s32[]) while(while_input), condition=while_cond, body=while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get(), true);
EXPECT_EQ(CountCopies(
*module->entry_computation()->root_instruction()->while_body()),
0);
EXPECT_EQ(CountControlEdges(
*module->entry_computation()->root_instruction()->while_body()),
1);
}
TEST_F(LoopScheduleLinearizerTest, SkipAsyncCollectives) {
absl::string_view hlo_string = R"(
HloModule module
add {
x = s32[] parameter(0)
y = s32[] parameter(1)
ROOT add = s32[] add(x, y)
}
while_body {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
buffer = s32[] get-tuple-element(input), index=1
one = s32[] constant(1)
updated_counter = s32[] add(counter, one)
updated_buffer = s32[] add(buffer, counter)
ar_start = s32[] all-reduce-start(updated_buffer), replica_groups={}, to_apply=add
ar_done = s32[] all-reduce-done(ar_start)
ROOT out = (s32[], s32[]) tuple(updated_counter, ar_done)
}
while_cond {
input = (s32[], s32[]) parameter(0)
counter = s32[] get-tuple-element(input), index=0
bound = s32[] constant(100)
ROOT cmp = pred[] compare(counter, bound), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
buffer = s32[] parameter(0)
while_input = (s32[], s32[]) tuple(zero, buffer)
ROOT out = (s32[], s32[]) while(while_input), condition=while_cond, body=while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get(), false);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/loop_schedule_linearizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/loop_schedule_linearizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95f724dc-8304-472f-bfdf-85d4e1c4bdcf | cpp | tensorflow/tensorflow | hlo_cse | third_party/xla/xla/service/hlo_cse.cc | third_party/xla/xla/service/hlo_cse_test.cc | #include "xla/service/hlo_cse.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
template <bool kIsLayoutSensitive>
struct ConstantKey {
template <typename H>
friend H AbslHashValue(H h, const ConstantKey& key) {
h = H::combine(std::move(h), key.domain);
return Literal::Hash<H, kIsLayoutSensitive, 64>(
std::move(h), key.hlo->literal());
}
friend bool operator==(const ConstantKey& lhs, const ConstantKey& rhs) {
return lhs.domain == rhs.domain &&
(kIsLayoutSensitive ? Shape::Equal()
: Shape::Equal().IgnoreLayout())(
lhs.hlo->shape(), rhs.hlo->shape()) &&
lhs.hlo->literal().Equal(rhs.hlo->literal(), kIsLayoutSensitive);
}
HloConstantInstruction* hlo;
int64_t domain;
};
template <bool kIsLayoutSensitive>
absl::StatusOr<bool> CombineConstants(HloComputation* computation,
bool only_scalars) {
std::unique_ptr<HloDomainMap> domain_map;
if (absl::c_any_of(computation->instructions(),
[&](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kDomain;
})) {
TF_ASSIGN_OR_RETURN(domain_map, HloDomainMap::Create(computation, ""));
}
absl::flat_hash_set<ConstantKey<kIsLayoutSensitive>> constants;
int64_t combined = 0;
auto inst_it = computation->instructions().begin();
while (inst_it != computation->instructions().end()) {
HloInstruction* instruction = *inst_it;
++inst_it;
if (only_scalars && !ShapeUtil::IsScalar(instruction->shape())) {
continue;
}
HloInstruction* match = nullptr;
if (auto* constant_inst = DynCast<HloConstantInstruction>(instruction)) {
auto insert_result = constants.insert(ConstantKey<kIsLayoutSensitive>{
constant_inst,
(domain_map != nullptr ? domain_map->GetDomainId(instruction) : 0)});
if (!insert_result.second) {
match = insert_result.first->hlo;
}
}
if (match != nullptr) {
TF_CHECK_OK(instruction->ReplaceAllUsesWith(match));
TF_CHECK_OK(computation->RemoveInstruction(instruction));
++combined;
}
}
VLOG(4) << "Combined " << combined << " constants and iotas in "
<< computation->name() << " computation";
return combined > 0;
}
struct CseKey {
template <typename H>
friend H AbslHashValue(H h, const CseKey& key) {
auto instruction = key.hlo;
h = H::combine(std::move(h), instruction->opcode(),
instruction->shape().dimensions());
auto window_hash = [](H h, const Window& window) {
const auto& window_dims = window.dimensions();
for (const auto& window_dim : window_dims) {
h = H::combine(std::move(h), window_dim.size(), window_dim.stride(),
window_dim.padding_low(), window_dim.padding_high(),
window_dim.window_dilation(), window_dim.base_dilation(),
window_dim.window_reversal());
}
return H::combine(std::move(h), window_dims.size());
};
if (HloOpcodeIsBinaryCommutative(instruction->opcode())) {
CHECK_EQ(instruction->operand_count(), 2);
auto id0 = instruction->operand(0)->unique_id();
if (instruction->operand(0)->opcode() == HloOpcode::kIota) {
id0 = 0;
}
auto id1 = instruction->operand(1)->unique_id();
if (instruction->operand(1)->opcode() == HloOpcode::kIota) {
id1 = 0;
}
if (id0 > id1) {
std::swap(id0, id1);
}
h = H::combine(std::move(h), id0, id1);
} else {
for (auto operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kIota) {
continue;
}
h = H::combine(std::move(h), operand->unique_id());
}
}
for (auto c : instruction->called_computations()) {
h = H::combine(std::move(h), c->root_instruction()->opcode());
}
switch (instruction->opcode()) {
case HloOpcode::kSlice:
return H::combine(std::move(h), instruction->slice_starts(),
instruction->slice_strides());
case HloOpcode::kPad: {
const auto& padding_dims = instruction->padding_config().dimensions();
for (const auto& padding_dim : padding_dims) {
h = H::combine(std::move(h), padding_dim.edge_padding_low(),
padding_dim.edge_padding_high(),
padding_dim.interior_padding());
}
h = H::combine(std::move(h), padding_dims.size());
return std::move(h);
}
case HloOpcode::kDot: {
const auto& dot_dimension_numbers =
instruction->dot_dimension_numbers();
h = H::combine(
std::move(h),
absl::MakeSpan(dot_dimension_numbers.lhs_contracting_dimensions()),
absl::MakeSpan(dot_dimension_numbers.rhs_contracting_dimensions()),
absl::MakeSpan(dot_dimension_numbers.lhs_batch_dimensions()),
absl::MakeSpan(dot_dimension_numbers.rhs_batch_dimensions()));
return std::move(h);
}
case HloOpcode::kConvolution: {
const auto& conv_dimension_numbers =
instruction->convolution_dimension_numbers();
h = H::combine(
std::move(h), conv_dimension_numbers.input_batch_dimension(),
conv_dimension_numbers.input_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.input_spatial_dimensions()),
conv_dimension_numbers.kernel_input_feature_dimension(),
conv_dimension_numbers.kernel_output_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.kernel_spatial_dimensions()),
conv_dimension_numbers.output_batch_dimension(),
conv_dimension_numbers.output_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.output_spatial_dimensions()));
return window_hash(std::move(h), instruction->window());
}
case HloOpcode::kReduceWindow:
return window_hash(std::move(h), instruction->window());
case HloOpcode::kConcatenate:
case HloOpcode::kBroadcast:
case HloOpcode::kTranspose:
case HloOpcode::kReduce:
return H::combine(std::move(h), instruction->dimensions());
case HloOpcode::kGetTupleElement:
return H::combine(std::move(h), instruction->tuple_index());
case HloOpcode::kCompare:
return H::combine(
std::move(h),
Cast<HloCompareInstruction>(instruction)->direction());
default:
return std::move(h);
}
}
HloInstruction* hlo;
};
}
absl::StatusOr<bool> HloCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
const auto eq_instructions = [&](const HloInstruction* a,
const HloInstruction* b) {
if (a == b) {
return true;
}
if (a->opcode() != b->opcode() || a->opcode() != HloOpcode::kIota) {
return false;
}
return a->dimensions(0) == b->dimensions(0) &&
(is_layout_sensitive_
? ShapeUtil::Equal(a->shape(), b->shape())
: ShapeUtil::Compatible(a->shape(), b->shape()));
};
const auto eq_computations = [](const HloComputation* lhs,
const HloComputation* rhs) {
return *lhs == *rhs;
};
auto cse_equal = [&](const CseKey& lhs, const CseKey& rhs) {
return lhs.hlo->IdenticalIgnoringCommutativeOperandOrder(
*rhs.hlo, eq_instructions, eq_computations, is_layout_sensitive_,
true);
};
for (auto* computation : module->computations(execution_threads)) {
if (only_fusion_computations_ && !computation->IsFusionComputation()) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool combined,
is_layout_sensitive_
? CombineConstants<true>(computation, only_scalars_)
: CombineConstants<false>(computation, only_scalars_));
changed |= combined;
absl::flat_hash_set<CseKey, absl::Hash<CseKey>, decltype(cse_equal)>
representatives(computation->instruction_count() + 1,
absl::Hash<CseKey>{}, cse_equal);
for (auto instruction : computation->MakeInstructionPostOrder()) {
if (instruction->operand_count() == 0 &&
instruction->opcode() != HloOpcode::kPartitionId &&
instruction->opcode() != HloOpcode::kReplicaId) {
continue;
}
if (instruction->HasSideEffect()) {
continue;
}
if (only_scalars_ && !ShapeUtil::IsScalar(instruction->shape())) {
continue;
}
auto pair = representatives.insert(CseKey{instruction});
if (!pair.second) {
HloInstruction* equivalent_instruction = pair.first->hlo;
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(equivalent_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(
instruction, std::nullopt,
ignore_control_dependencies_));
VLOG(4) << "Replaced " << instruction->name() << " with "
<< equivalent_instruction->name();
changed = true;
continue;
}
for (int64_t i = 0; i < instruction->operand_count(); ++i) {
HloInstruction* a = instruction->mutable_operand(i);
if (a->opcode() != HloOpcode::kIota) {
continue;
}
for (int64_t j = i + 1; j < instruction->operand_count(); ++j) {
HloInstruction* b = instruction->mutable_operand(j);
if (a == b || !eq_instructions(a, b)) {
continue;
}
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(j, a));
changed = true;
if (b->IsDead()) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(b));
}
}
}
}
if (auto fusion = computation->FusionInstruction()) {
if (fusion->IsMultiOutputFusion()) {
absl::flat_hash_map<const HloInstruction*, int64_t>
root_to_unique_index;
int64_t root_index = 0;
HloInstruction* root = computation->root_instruction();
for (const HloInstruction* hlo : root->operands()) {
if (root_to_unique_index.find(hlo) == root_to_unique_index.end()) {
root_to_unique_index[hlo] = root_to_unique_index[hlo] = root_index;
}
++root_index;
}
if (root_to_unique_index.size() < root->operand_count()) {
for (HloInstruction* user : fusion->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
const HloInstruction* fusion_root =
root->operand(user->tuple_index());
user->set_tuple_index(root_to_unique_index[fusion_root]);
}
}
}
}
}
}
return changed;
}
} | #include "xla/service/hlo_cse.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "absl/algorithm/container.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
namespace m = xla::match;
class HloCseTest : public HloTestBase {
protected:
HloCseTest() {}
};
TEST_F(HloCseTest, CombineTwoConstants) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
HloInstruction* constant = *computation->instructions().begin();
EXPECT_EQ(42.0f, constant->literal().Get<float>({}));
auto result = ExecuteAndTransfer(module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(84.0);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(1e-4)));
}
TEST_F(HloCseTest, CombineTwoConstantsDifferentLayouts) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_THAT(add, op::Add(constant1, constant2));
HloCSE cse(true);
EXPECT_FALSE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_THAT(add, op::Add(constant1, constant2));
auto result = ExecuteAndTransfer(module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(1e-4)));
}
TEST_F(HloCseTest, ConstantsSameValueDifferentType) {
auto builder = HloComputation::Builder(TestName());
std::vector<HloInstruction*> constants;
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32_t>(42))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint64_t>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<double>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
for (int64_t i = 0; i < constants.size(); ++i) {
constants[i] = builder.AddInstruction(
HloInstruction::CreateConvert(shape_r0, constants[i]));
}
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
shape_r0, HloOpcode::kAdd, constants[0], constants[1]));
for (int64_t i = 2; i < constants.size(); ++i) {
root = builder.AddInstruction(HloInstruction::CreateBinary(
shape_r0, HloOpcode::kAdd, root, constants[i]));
}
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(20, computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(18, computation->instruction_count());
}
TEST_F(HloCseTest, NonscalarConstants) {
auto builder = HloComputation::Builder(TestName());
auto common_constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto common_constant2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto uncommon_constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}})));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
{common_constant1, common_constant2, uncommon_constant}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple,
op::Tuple(common_constant1, common_constant2, uncommon_constant));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand,
::testing::AnyOf(common_constant1, common_constant2));
EXPECT_THAT(tuple,
op::Tuple(first_operand, first_operand, uncommon_constant));
}
TEST_F(HloCseTest, IdenticalInstructions) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp3 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2, exp3}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2, exp3));
HloCSE cse(true);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2, exp3));
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand, first_operand));
}
TEST_F(HloCseTest, WhileLoopsIdenticalConditionsAndBodiesSameInput) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalConditionsAndBodiesSameInput
%body (param: (f32[], f32[])) -> (f32[], f32[]) {
%param = (f32[], f32[]) parameter(0)
%gte0 = get-tuple-element(%param), index=0
%gte1 = get-tuple-element(%param), index=1
%add = add(%gte0, %gte1)
ROOT %tuple = tuple(%gte0, %add)
}
%condition {
%param.1 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(false)
}
ENTRY %WhileLoopsIdenticalConditionsAndBodiesSameInput {
%c0 = f32[] constant(1)
%c1 = f32[] constant(2)
%t = tuple(c0, c1)
%while = while(%t), condition=%condition, body=%body
%while.1 = while(%t), condition=%condition.1, body=%body
ROOT r = tuple(while, while.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(6, computation->instruction_count());
HloCSE cse(true);
EXPECT_TRUE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, WhileLoopsIdenticalConditionsSameInputAndDifferentBodies) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalConditionsSameInputAndDifferentBodies
%body {
%param = (f32[], f32[]) parameter(0)
%get-tuple-element = get-tuple-element(%param), index=0
%get-tuple-element.1 = get-tuple-element(%param), index=1
%add = add(%get-tuple-element, %get-tuple-element.1)
ROOT %tuple = tuple(%get-tuple-element, %add)
}
%body2 {
%param.1 = (f32[], f32[]) parameter(0)
%get-tuple-element.2 = get-tuple-element(%param.1), index=0
%get-tuple-element.3 = get-tuple-element(%param.1), index=1
%sub = subtract(%get-tuple-element.2, %get-tuple-element.3)
ROOT %tuple.2 = tuple(%get-tuple-element.2, %sub)
}
%condition (param.2: (f32[], f32[])) -> pred[] {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 (param.3: (f32[], f32[])) -> pred[] {
%param.3 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(false)
}
ENTRY %WhileLoopsIdenticalConditionsSameInputAndDifferentBodies {
%constant.2 = f32[] constant(1)
%constant.3 = f32[] constant(2)
%tuple.1 = tuple(f32[] %constant.2, f32[] %constant.3)
%while = while(%tuple.1), condition=%condition, body=%body
ROOT %while.1 = while(%tuple.1), condition=%condition.1, body=%body2
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(5, computation->instruction_count());
HloCSE cse(true);
EXPECT_FALSE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, WhileLoopsIdenticalBodiesAndInputDifferentConditions) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalBodiesAndInputDifferentConditions
%body {
%param = (f32[], f32[]) parameter(0)
%get-tuple-element = get-tuple-element(%param), index=0
%get-tuple-element.1 = get-tuple-element((f32[], f32[]) %param), index=1
%add = add(%get-tuple-element, %get-tuple-element.1)
ROOT %tuple = tuple(%get-tuple-element, %add)
}
%condition {
%param.1 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(true)
}
ENTRY %WhileLoopsIdenticalBodiesAndInputDifferentConditions {
%constant.2 = f32[] constant(1)
%constant.3 = f32[] constant(2)
%tuple.1 = tuple(%constant.2, %constant.3)
%while = while(%tuple.1), condition=%condition, body=%body
ROOT %while.1 = while(%tuple.1), condition=%condition.1, body=%body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(5, computation->instruction_count());
HloCSE cse(true);
EXPECT_FALSE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp1->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp2->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(true);
EXPECT_FALSE(cse.Run(module.get()).value());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp1->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp2->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2));
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand));
}
TEST_F(HloCseTest, FusionInternalCSE) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape_r0, "p0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_r0, "p1"));
auto add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kAdd, param0, param1));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kAdd, param0, param1));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kMultiply, add1, add2));
auto computation = module->AddEntryComputation(builder.Build());
auto fused_computation =
computation
->CreateFusionInstruction({mul, add1, add2},
HloInstruction::FusionKind::kLoop)
->fused_instructions_computation();
EXPECT_EQ(5, fused_computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(4, fused_computation->instruction_count());
auto root = fused_computation->root_instruction();
EXPECT_THAT(root, op::Multiply(root->operand(0), root->operand(0)));
}
TEST_F(HloCseTest, IdenticalExpressions) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, negate1, exp1));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, negate2, exp2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add1, add2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(8, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(op::Add(negate1, exp1), op::Add(negate2, exp2)));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
auto operand = tuple->operand(0);
EXPECT_THAT(tuple, op::Tuple(operand, operand));
EXPECT_THAT(operand, op::Add(op::Negate(), op::Exp()));
}
TEST_F(HloCseTest, DoNotCombineRng) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng1 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
auto rng2 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, rng1, rng2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Add(rng1, rng2));
uint32_t count_before = computation->instruction_count();
HloCSE cse(false);
EXPECT_FALSE(cse.Run(module.get()).value());
uint32_t count_after = computation->instruction_count();
EXPECT_EQ(count_before, count_after);
root = computation->root_instruction();
EXPECT_THAT(root, op::Add(rng1, rng2));
}
TEST_F(HloCseTest, DoNotCombineOpsWithDifferentShardings) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
constant.68 = s32[1]{0} constant({0})
custom-call.82 = s32[1]{0} custom-call(constant.68), custom_call_target="Sharding", sharding={replicated}
custom-call.1343 = s32[1]{0} custom-call(constant.68), custom_call_target="Sharding", sharding={manual}
custom-call.1344 = s32[8]{0} custom-call(custom-call.1343), custom_call_target="SPMDShardToFullShape", sharding={devices=[8]0,1,2,3,4,5,6,7}
ROOT tuple = (s32[1]{0}, s32[8]{0}) tuple(custom-call.82, custom-call.1344)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_FALSE(cse.Run(m.get()).value());
}
TEST_F(HloCseTest, DoNotCombineCallsToImpureFunctions) {
auto module = CreateNewVerifiedModule();
HloComputation* rng_function = nullptr;
{
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName() + "_rng_fun");
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
scalar_shape, RandomDistribution::RNG_UNIFORM, {constant1, constant2}));
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param"));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, rng, param));
rng_function = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* computation = nullptr;
{
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({5.0f})));
auto rng1 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
auto rng2 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, rng1, rng2));
computation = module->AddEntryComputation(builder.Build());
}
EXPECT_EQ(4, computation->instruction_count());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Add(op::Map(), op::Map()));
VLOG(3) << "before: " << module->ToString();
HloCSE cse(false);
EXPECT_FALSE(cse.Run(module.get()).value());
VLOG(3) << "after: " << module->ToString();
EXPECT_EQ(4, computation->instruction_count());
root = computation->root_instruction();
EXPECT_THAT(root, op::Add(op::Map(op::Constant()), op::Map(op::Constant())));
}
TEST_F(HloCseTest, CompareComputations) {
const char* const hlo_string = R"(
HloModule m
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = add(add_lhs, add_rhs)
}
add_computation2 {
add_lhs2 = f32[] parameter(0)
add_rhs2 = f32[] parameter(1)
ROOT add_root2 = add(add_lhs2, add_rhs2)
}
ENTRY entry {
p = f32[10]{0} parameter(0)
c = f32[] constant(0)
r1 = reduce(p, c), dimensions={0}, to_apply=add_computation
r2 = reduce(p, c), dimensions={0}, to_apply=add_computation2
ROOT f2 = tuple(r1, r2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1));
}
TEST_F(HloCseTest, Domain) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = f32[] parameter(0), sharding={maximal device=0}
%domain.0 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
%domain.1 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
%domain.2 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=2}}
%negate.0 = f32[] negate(%domain.0)
%negate.1 = f32[] negate(%domain.1)
%negate.2 = f32[] negate(%domain.2)
%domain.3 = f32[] domain(%negate.0),
domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
%domain.4 = f32[] domain(%negate.1),
domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
%domain.5 = f32[] domain(%negate.2),
domain={kind="sharding", entry={maximal device=2}, exit={maximal device=0}}
%add = f32[] add(%domain.3, %domain.4)
ROOT %sub = f32[] subtract(%add, %domain.5)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(m.get()).value());
const HloInstruction* sub = m->entry_computation()->root_instruction();
const HloInstruction* add = sub->operand(0);
EXPECT_EQ(add->operand(0), add->operand(1));
EXPECT_NE(add->operand(0), sub->operand(1));
EXPECT_NE(add->operand(1), sub->operand(1));
}
TEST_F(HloCseTest, Iota) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
i1 = s64[16,16] iota(), iota_dimension=0
i2 = s64[16,16] iota(), iota_dimension=0
i3 = s64[17,16] iota(), iota_dimension=0
i4 = s64[16,16] iota(), iota_dimension=1
ROOT root = (s64[16,16], s64[16,16], s64[17,16], s64[16,16]) tuple(i1, i2, i3, i4)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_TRUE(changed);
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1));
EXPECT_NE(root->operand(0), root->operand(2));
EXPECT_NE(root->operand(0), root->operand(3));
}
TEST_F(HloCseTest, OptimizationBarrier) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%add.0 = f32[] add(%param.0, %param.1)
%cse_tmp.0 = (f32[], f32[], f32[]) tuple(%param.0, %param.1, %add.0)
%cse_tmp.1 = (f32[], f32[], f32[]) opt-barrier(%cse_tmp.0)
%param.0.1 = f32[] get-tuple-element(%cse_tmp.1), index=0
%param.1.1 = f32[] get-tuple-element(%cse_tmp.1), index=1
%add.0.1 = f32[] get-tuple-element(%cse_tmp.1), index=2
%add.1 = f32[] add(%param.0.1, %param.1.1)
ROOT %add.2 = f32[] add(%add.1, %add.0.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_FALSE(changed);
}
TEST_F(HloCseTest, OnlyScalar) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
%const1 = f32[] constant(1)
%const2 = f32[] constant(1)
%const3 = f32[2] constant({1,2})
%const4 = f32[2] constant({1,2})
%add.0 = f32[] add(%const1, %const2)
%add.1 = f32[2] add(%const3, %const4)
ROOT out = (f32[], f32[2]) tuple(%add.0, %add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false, false,
false, true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(absl::c_count_if(m->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return instruction->IsConstant();
}),
3);
}
class HloCseCustomCallTest
: public HloCseTest,
public ::testing::WithParamInterface<std::tuple<
std::string , std::string , bool >> {};
TEST_P(HloCseCustomCallTest, DoIt) {
std::string op1 = std::get<0>(GetParam());
std::string op2 = std::get<1>(GetParam());
bool should_cse = std::get<2>(GetParam());
const char* const hlo_string_tmpl = R"(
HloModule m
ENTRY entry {
p0 = f32[1,1,1] parameter(0)
op0 = $0
op1 = $0
op2 = $1
ROOT root = tuple(op0, op1, op2)
}
)";
std::string hlo_string = absl::Substitute(hlo_string_tmpl, op1, op2);
SCOPED_TRACE(absl::StrCat("Module before CSE:\n", hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1))
<< "Identical ops should be CSE'ed";
if (should_cse) {
EXPECT_EQ(root->operand(0), root->operand(2)) << "Ops should be CSE'ed";
} else {
EXPECT_NE(root->operand(0), root->operand(2)) << "Ops should not be CSE'ed";
}
}
static std::vector<
std::tuple<std::string , std::string , bool >>
CustomCallTests() {
auto build = [](absl::string_view args1, absl::string_view args2) {
absl::string_view prefix =
"f32[] custom-call(p0), custom_call_target=\"foo\", ";
return std::make_tuple(absl::StrCat(prefix, args1),
absl::StrCat(prefix, args2), false);
};
return {
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0), custom_call_target=\"foo\", "
"metadata={op_name=\"bar\"}",
true,
},
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0, p0), custom_call_target=\"foo\"",
false,
},
{
"f32[1] custom-call(p0), custom_call_target=\"foo\"",
"f32[2] custom-call(p0), custom_call_target=\"foo\"",
false,
},
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0), custom_call_target=\"bar\"",
false,
},
build("window={size=1}", "window={size=2}"),
build("dim_labels=b0f_0oi->b0f", "dim_labels=b0f_0oi->bf0"),
build("backend_config=\"foo\"", "backend_config=\"bar\""),
build("literal=s32[] 0", "literal=s32[] 1"),
build("literal=s32[] 0", "literal=f32[] 0"),
build("operand_precision={high,default}",
"operand_precision={high, high}"),
build("api_version=API_VERSION_STATUS_RETURNING",
"api_version=API_VERSION_ORIGINAL"),
build("feature_group_count=0", "feature_group_count=1"),
};
}
INSTANTIATE_TEST_SUITE_P(HloCseCustomCallTestSuite, HloCseCustomCallTest,
::testing::ValuesIn(CustomCallTests()));
TEST_F(HloCseTest, CustomCallCalledComputations) {
const char* const hlo_string = R"(
HloModule m
comp {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY entry {
p0 = f32[] parameter(0)
op0 = f32[] custom-call(p0), custom_call_target="foo", called_computations={comp}
op1 = f32[] custom-call(p0), custom_call_target="foo", called_computations={comp, comp}
ROOT root = tuple(op0, op1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, false);
}
TEST_F(HloCseTest, CustomCallSideEffects) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[] parameter(0)
op0 = f32[] custom-call(p0), custom_call_target="foo", custom_call_has_side_effect=true
op1 = f32[] custom-call(p0), custom_call_target="foo", custom_call_has_side_effect=true
ROOT root = tuple(op0, op1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, false);
}
TEST_F(HloCseTest, IgnoreControlDependencies) {
const char* const hlo_string = R"(
HloModule m
%add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT x = f32[] add(p0, p1)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ar0 = f32[] all-reduce(p0), replica_groups={}, to_apply=%add
ar1 = f32[] all-reduce(p1), replica_groups={}, to_apply=%add, control-predecessors={ar0}
ar2 = f32[] all-reduce(p0), replica_groups={}, to_apply=%add, control-predecessors={ar1}
ROOT root = tuple(ar0, ar1, ar2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false, false,
true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
}
TEST_F(HloCseTest, MultiOutputFusion) {
const char* const hlo_string = R"(
HloModule m
f {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add.0 = f32[] add(p0, p1)
add.1 = f32[] add(p0, p1)
ROOT res = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
fusion = (f32[], f32[]) fusion(p0, p1), kind=kLoop, calls=f
gte0 = f32[] get-tuple-element(fusion), index=0
gte1 = f32[] get-tuple-element(fusion), index=1
ROOT res = (f32[], f32[]) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
HloInstruction* root = m->entry_computation()->root_instruction();
HloInstruction* add0;
HloInstruction* add1;
HloInstruction* gte0;
HloInstruction* gte1;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(>e0),
m::GetTupleElement(>e1))));
EXPECT_EQ(gte0, gte1);
EXPECT_EQ(gte0->tuple_index(), 0);
const HloInstruction* fusion = gte0->operand(0);
ASSERT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Add(&add0, m::Parameter(0), m::Parameter(1)),
m::Add(&add1, m::Parameter(0), m::Parameter(1)))));
EXPECT_EQ(add0, add1);
}
class HloCseCommutativeOpTest
: public HloCseTest,
public ::testing::WithParamInterface<std::string > {};
TEST_P(HloCseCommutativeOpTest, DoIt) {
std::string op = GetParam();
const char* kModuleStr = R"(
HloModule m
ENTRY test {
p0 = s32[10] parameter(0)
p1 = s32[10] parameter(1)
op1 = s32[10] $0(p0, p1)
op2 = s32[10] $0(p1, p0)
ROOT t = tuple(op1, op2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
absl::Substitute(kModuleStr, op)));
ASSERT_TRUE(HloCSE(false).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* op0;
const HloInstruction* op1;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Op(&op0), m::Op(&op1))));
EXPECT_EQ(op0, op1);
}
INSTANTIATE_TEST_SUITE_P(AlgebraicSimplifierCanonicalizeCommutativeTestSuite,
HloCseCommutativeOpTest,
::testing::Values("add", "multiply", "and", "or",
"xor", "minimum", "maximum"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a162ff7a-8c7a-4e67-a748-3a3d8af56cdc | cpp | tensorflow/tensorflow | hlo_value_semantics_analysis | third_party/xla/xla/service/hlo_value_semantics_analysis.cc | third_party/xla/xla/service/hlo_value_semantics_analysis_test.cc | #include "xla/service/hlo_value_semantics_analysis.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/side_effect_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
SendRecvGroupMap::SendRecvGroupMap(const HloModule& hlo_module) {
for (HloComputation* computation : hlo_module.computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kSend &&
instruction->opcode() != HloOpcode::kRecv) {
continue;
}
std::string rendezvous = instruction->frontend_attributes().map().at(
kXlaHostTransferRendezvousNameAttr);
auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous);
if (send_recv_iter == host_transfer_rendezvous_map_.end()) {
auto insert_success = host_transfer_rendezvous_map_.insert(
{rendezvous, SendRecvGroup{nullptr, nullptr}});
send_recv_iter = insert_success.first;
}
if (instruction->opcode() == HloOpcode::kSend) {
send_recv_iter->second.send = instruction;
} else {
send_recv_iter->second.recv = instruction;
}
}
}
}
absl::StatusOr<HloInstruction*> SendRecvGroupMap::GetMatchingSendOrRecv(
HloInstruction* send_or_recv) const {
if (send_or_recv->opcode() != HloOpcode::kSend &&
send_or_recv->opcode() != HloOpcode::kRecv) {
return InvalidArgument("Expecting only send or recv");
}
std::string rendezvous = send_or_recv->frontend_attributes().map().at(
kXlaHostTransferRendezvousNameAttr);
auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous);
if (send_recv_iter == host_transfer_rendezvous_map_.end()) {
return Internal("Missing send or recv from send recv group.");
}
if (send_or_recv->opcode() == HloOpcode::kSend) {
return send_recv_iter->second.recv;
}
return send_recv_iter->second.send;
}
bool HloPreOrderDFS::IsReady(const HloInstruction* instruction) const {
for (HloInstruction* user : instruction->users()) {
if (!visited_.contains(user)) {
return false;
}
}
return true;
}
namespace {
std::vector<HloInstruction*> GetAllInstructionsWithZeroUsers(
const HloComputation& computation) {
std::vector<HloInstruction*> results;
for (HloInstruction* instruction : computation.instructions()) {
if (instruction->users().empty()) {
results.push_back(instruction);
}
}
return results;
}
}
absl::Status HloPreOrderDFS::Run(const HloComputation& computation,
DfsHloVisitorBase<HloInstruction*>* visitor) {
stack_.clear();
visited_.clear();
std::vector<HloInstruction*> roots =
GetAllInstructionsWithZeroUsers(computation);
for (HloInstruction* root : roots) {
stack_.push_back(root);
}
while (!stack_.empty()) {
HloInstruction* to_visit = stack_.back();
stack_.pop_back();
if (visited_.contains(to_visit)) {
continue;
}
visited_.insert(to_visit);
for (HloInstruction* operand : to_visit->mutable_operands()) {
if (IsReady(operand)) {
stack_.push_back(operand);
}
}
TF_RETURN_IF_ERROR(visitor->Preprocess(to_visit));
TF_RETURN_IF_ERROR(to_visit->Visit(visitor));
TF_RETURN_IF_ERROR(visitor->Postprocess(to_visit));
}
return absl::OkStatus();
}
namespace {
template <typename T>
std::string ToString(T element) {
return absl::StrCat(element);
}
template <>
std::string ToString(const HloValueSemantics* element) {
return element->ToString();
}
template <typename T>
std::string ToString(const ShapeTree<T>& tree) {
std::string str;
tree.ForEachElement([&str, &tree](const ShapeIndex& shape_index, T element) {
auto subshape = ShapeUtil::GetSubshape(tree.shape(), (shape_index));
absl::StrAppend(&str, shape_index.ToString(), ", ", subshape.ToString(),
": ", ToString(element), "\n");
});
return str;
}
}
absl::Status EinsumDepthAnalysis::RunInternal(
const HloComputation& computation,
const std::optional<ShapeTree<int>>& root_depth) {
std::vector<HloInstruction*> roots =
GetAllInstructionsWithZeroUsers(computation);
for (HloInstruction* root : roots) {
if (root == computation.root_instruction()) {
if (root_depth.has_value()) {
TF_RETURN_IF_ERROR(SetInstructionDepth(root, *root_depth));
} else {
TF_RETURN_IF_ERROR(SetInstructionDepth(root, 0));
}
} else {
GetOrCreateDepthTree(root);
}
}
HloPreOrderDFS dfs;
return dfs.Run(computation, this);
}
absl::StatusOr<std::unique_ptr<EinsumDepthAnalysis>> EinsumDepthAnalysis::Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map) {
EinsumDepthAnalysis* analysis_ptr =
new EinsumDepthAnalysis(send_recv_group_map);
std::unique_ptr<EinsumDepthAnalysis> analysis(analysis_ptr);
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, std::nullopt));
return analysis;
}
namespace {
int MergeDepth(int original_depth, int new_depth) {
if (new_depth >= 0) {
return std::max(original_depth, new_depth);
}
if (new_depth < 0 && original_depth < 0) {
return std::min(original_depth, new_depth);
}
return original_depth;
}
void SetDepth(ShapeTree<int>& depth_tree, int depth) {
depth_tree.ForEachMutableElement(
[depth, &depth_tree](const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
*depth_ptr = MergeDepth(*depth_ptr, depth);
}
});
}
void SetDepth(ShapeTree<int>& depth_tree, const ShapeTree<int>& source) {
depth_tree.ForEachMutableElement(
[&depth_tree, &source](const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
*depth_ptr = MergeDepth(*depth_ptr, source.element(shape_index));
}
});
}
int GetMaxDepth(const ShapeTree<int>& depth_tree) {
int max_depth = -1;
depth_tree.ForEachElement(
[&max_depth](const ShapeIndex& shape_index, int depth) {
max_depth = std::max(max_depth, depth);
return absl::OkStatus();
});
if (max_depth >= 0) {
return max_depth;
}
depth_tree.ForEachElement(
[&max_depth](const ShapeIndex& shape_index, int depth) {
max_depth = std::min(max_depth, depth);
return absl::OkStatus();
});
return max_depth;
}
void SetDepthFromTupleDepth(ShapeTree<int>& depth_tree,
const ShapeTree<int>& tuple_depth_tree,
int tuple_index) {
depth_tree.ForEachMutableElement(
[&depth_tree, &tuple_depth_tree, tuple_index](
const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
ShapeIndex output_index = shape_index;
output_index.push_front(tuple_index);
*depth_ptr =
MergeDepth(*depth_ptr, tuple_depth_tree.element(output_index));
}
});
}
}
ShapeTree<int>& EinsumDepthAnalysis::GetOrCreateDepthTree(
const HloInstruction* instruction) {
auto depth_iter = einsum_depth_map_.find(instruction);
if (depth_iter == einsum_depth_map_.end()) {
ShapeTree<int> depth_tree(instruction->shape(), -1);
auto inserted = einsum_depth_map_.insert(
std::make_pair(instruction, std::move(depth_tree)));
depth_iter = inserted.first;
}
return depth_iter->second;
}
ShapeTree<int>& EinsumDepthAnalysis::GetDepthTreeOrDie(
const HloInstruction* instruction) {
auto depth_iter = einsum_depth_map_.find(instruction);
CHECK(depth_iter != einsum_depth_map_.end())
<< "No depth tree found for instruction: " << instruction->ToString();
return depth_iter->second;
}
absl::Status EinsumDepthAnalysis::SetInstructionDepth(
const HloInstruction* instruction, int depth) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepth(depth_tree, depth);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::SetInstructionDepth(
const HloInstruction* instruction, const ShapeTree<int>& depth) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepth(depth_tree, depth);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::SetInstructionDepthFromTupleDepth(
const HloInstruction* instruction, const ShapeTree<int>& tuple_depth_tree,
int tuple_index) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepthFromTupleDepth(depth_tree, tuple_depth_tree, tuple_index);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::DefaultAction(HloInstruction* instruction) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction);
int max_depth = GetMaxDepth(depth_tree);
for (int operand_index = 0; operand_index < instruction->operand_count();
++operand_index) {
const HloInstruction* operand = instruction->operand(operand_index);
TF_RETURN_IF_ERROR(SetInstructionDepth(operand, max_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleTuple(HloInstruction* tuple) {
return HandleTupleLike(tuple);
}
absl::Status EinsumDepthAnalysis::HandleAllReduce(HloInstruction* all_reduce) {
if (all_reduce->shape().IsArray()) {
return DefaultAction(all_reduce);
}
return HandleTupleLike(all_reduce);
}
absl::Status EinsumDepthAnalysis::HandleTupleLike(HloInstruction* tuple_like) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(tuple_like);
for (int operand_index = 0; operand_index < tuple_like->operand_count();
++operand_index) {
HloInstruction* operand = tuple_like->mutable_operand(operand_index);
ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
SetDepthFromTupleDepth(operand_depth, depth_tree, operand_index);
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(get_tuple_element);
HloInstruction* operand = get_tuple_element->mutable_operand(0);
int tuple_index = get_tuple_element->tuple_index();
ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
operand_depth.ForEachMutableElement(
[&operand_depth, &depth_tree, tuple_index](const ShapeIndex& shape_index,
int* depth_ptr) {
if (shape_index.empty() || shape_index.front() != tuple_index) {
return;
}
if (operand_depth.IsLeaf(shape_index)) {
ShapeIndex output_index = shape_index;
output_index.pop_front();
*depth_ptr = MergeDepth(*depth_ptr, depth_tree.element(output_index));
}
});
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleDepthIncrementInstruction(
HloInstruction* instruction) {
ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction);
int instruction_depth = depth_tree.element({});
for (HloInstruction* operand : instruction->mutable_operands()) {
TF_RETURN_IF_ERROR(SetInstructionDepth(
operand, instruction_depth >= 0 ? instruction_depth + 1
: instruction_depth - 1));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleDot(HloInstruction* dot) {
return HandleDepthIncrementInstruction(dot);
}
absl::Status EinsumDepthAnalysis::HandleConvolution(
HloInstruction* convolution) {
return HandleDepthIncrementInstruction(convolution);
}
absl::Status EinsumDepthAnalysis::HandleCall(HloInstruction* call) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(call);
return HandleCalledComputation(*call->called_computations()[0], depth_tree,
call->operands());
}
absl::Status EinsumDepthAnalysis::HandleFusion(HloInstruction* fusion) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(fusion);
return HandleCalledComputation(*fusion->called_computations()[0], depth_tree,
fusion->operands());
}
absl::Status EinsumDepthAnalysis::HandleWhile(HloInstruction* xla_while) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(xla_while);
int max_depth = GetMaxDepth(depth_tree);
HloComputation* condition_computation = xla_while->while_condition();
HloInstruction* condition_root = condition_computation->root_instruction();
ShapeTree<int> condition_depth(condition_root->shape(), max_depth);
TF_RETURN_IF_ERROR(HandleCalledComputation(
*condition_computation, condition_depth, xla_while->operands()));
const ShapeTree<int>* root_depth_ptr = &depth_tree;
HloComputation* body_computation = xla_while->while_body();
bool run_depth_propagation_on_body = true;
ShapeTree<int>& root_depth =
GetOrCreateDepthTree(body_computation->root_instruction());
while (run_depth_propagation_on_body) {
run_depth_propagation_on_body = false;
TF_RETURN_IF_ERROR(HandleCalledComputation(
*body_computation, *root_depth_ptr, xla_while->operands()));
HloInstruction* operand = body_computation->parameter_instruction(0);
const ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
root_depth.ForEachMutableElement(
[&run_depth_propagation_on_body, &root_depth, &operand_depth](
const ShapeIndex& shape_index, int* depth_ptr) {
if (!root_depth.IsLeaf(shape_index)) {
return;
}
if (root_depth.element(shape_index) < 0 &&
operand_depth.element(shape_index) >= 0) {
*depth_ptr = operand_depth.element(shape_index);
run_depth_propagation_on_body = true;
}
});
root_depth_ptr = &root_depth;
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleConditional(
HloInstruction* conditional) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(conditional);
TF_RETURN_IF_ERROR(
SetInstructionDepth(conditional->operands()[0], depth_tree));
for (int i = 0; i < conditional->branch_count(); ++i) {
TF_RETURN_IF_ERROR(
HandleCalledComputation(*conditional->called_computations()[i],
depth_tree, {conditional->operands()[i + 1]}));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleCalledComputation(
const HloComputation& called_computation, const ShapeTree<int>& root_depth,
absl::Span<HloInstruction* const> operands) {
TF_RETURN_IF_ERROR(RunInternal(called_computation,
std::optional<ShapeTree<int>>(root_depth)));
for (int i = 0; i < operands.size(); ++i) {
HloInstruction* operand = operands[i];
HloInstruction* parameter = called_computation.parameter_instruction(i);
const ShapeTree<int>& parameter_depth = GetOrCreateDepthTree(parameter);
TF_RETURN_IF_ERROR(SetInstructionDepth(operand, parameter_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleAfterAll(HloInstruction* after_all) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(after_all);
int max_depth = GetMaxDepth(depth_tree);
for (HloInstruction* operand_token : after_all->mutable_operands()) {
CHECK(operand_token->shape().IsToken());
TF_RETURN_IF_ERROR(SetInstructionDepth(operand_token, max_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleSend(HloInstruction* send) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(send);
HloInstruction* send_buffer = send->mutable_operand(0);
ShapeTree<int>& send_buffer_depth = GetOrCreateDepthTree(send_buffer);
SetDepthFromTupleDepth(send_buffer_depth, depth_tree, 0);
int max_depth = GetMaxDepth(depth_tree);
HloInstruction* token = send->mutable_operand(1);
return SetInstructionDepth(token, max_depth);
}
absl::Status EinsumDepthAnalysis::HandleRecv(HloInstruction* recv) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(recv);
TF_ASSIGN_OR_RETURN(HloInstruction * send,
send_recv_group_map_->GetMatchingSendOrRecv(recv));
CHECK(send) << "recv: " << recv->name()
<< " not found in send_recv_group_map: " << recv->ToString();
ShapeTree<int>& send_depth = GetOrCreateDepthTree(send);
int max_depth = GetMaxDepth(depth_tree);
send_depth.ForEachMutableElement([&depth_tree, &send_depth, max_depth](
const ShapeIndex& index, int* depth) {
if (!send_depth.IsLeaf(index)) {
return;
}
if (index.front() == 0) {
*depth = MergeDepth(*depth, depth_tree.element(index));
return;
}
*depth = MergeDepth(*depth, max_depth);
});
HloInstruction* after_all = recv->mutable_operand(0);
return SetInstructionDepth(after_all, max_depth);
}
absl::Status EinsumDepthAnalysis::HandleSendDone(HloInstruction* send_done) {
HloInstruction* send = send_done->mutable_operand(0);
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(send_done);
int max_depth = GetMaxDepth(depth_tree);
return SetInstructionDepth(send, max_depth);
}
absl::Status EinsumDepthAnalysis::HandleRecvDone(HloInstruction* recv_done) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(recv_done);
int max_depth = GetMaxDepth(depth_tree);
HloInstruction* recv = recv_done->mutable_operand(0);
ShapeTree<int>& recv_depth = GetOrCreateDepthTree(recv);
recv_depth.ForEachMutableElement([&depth_tree, &recv_depth, max_depth](
const ShapeIndex& index, int* depth) {
if (!recv_depth.IsLeaf(index)) {
return;
}
if (index.front() == 0) {
*depth = MergeDepth(*depth, depth_tree.element(index));
return;
}
*depth = MergeDepth(*depth, max_depth);
});
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleAsyncStart(
HloInstruction* async_start) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(async_start);
TF_ASSIGN_OR_RETURN(ShapeTree<int> output_depth_tree,
depth_tree.SubShapeTree({1}));
return HandleCalledComputation(*(async_start->async_wrapped_computation()),
output_depth_tree, async_start->operands());
}
absl::Status EinsumDepthAnalysis::HandleAsyncDone(HloInstruction* async_done) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(async_done);
HloInstruction* async_start = async_done->mutable_operand(0);
ShapeTree<int>& async_start_depth = GetOrCreateDepthTree(async_start);
async_start_depth.ForEachMutableElement(
[&depth_tree, &async_start_depth](const ShapeIndex& index, int* depth) {
if (!async_start_depth.IsLeaf(index)) {
return;
}
if (index.front() == 1) {
ShapeIndex output_index = index;
output_index.pop_front();
*depth = MergeDepth(*depth, depth_tree.element(output_index));
}
});
return absl::OkStatus();
}
namespace {
int MergeHeight(int original_height, int new_height) {
return std::max(original_height, new_height);
}
void SetHeight(ShapeTree<int>& height_tree, int height) {
height_tree.ForEachMutableElement(
[height, &height_tree](const ShapeIndex& shape_index, int* height_ptr) {
if (height_tree.IsLeaf(shape_index)) {
*height_ptr = MergeHeight(*height_ptr, height);
}
});
}
void SetHeight(ShapeTree<int>& height_tree, const ShapeTree<int>& source,
const ShapeIndex& source_index = {},
const ShapeIndex& target_index = {}) {
height_tree.ForEachMutableElement(
[&source, &source_index, &target_index](const ShapeIndex& shape_index,
int* height_ptr) {
if (shape_index.size() < target_index.size()) {
return;
}
for (int i = 0; i < target_index.size(); ++i) {
if (shape_index[i] != target_index[i]) {
return;
}
}
ShapeIndex complete_source_index = source_index;
for (int i = target_index.size(); i < shape_index.size(); ++i) {
complete_source_index.push_back(shape_index[i]);
}
*height_ptr =
MergeHeight(*height_ptr, source.element(complete_source_index));
});
}
int GetMaxHeight(const ShapeTree<int>& height_tree) {
int max_height = 0;
height_tree.ForEachElement(
[&max_height](const ShapeIndex& shape_index, int height) {
max_height = std::max(max_height, height);
return absl::OkStatus();
});
return max_height;
}
int GetMaxOperandHeight(HloInstruction* instruction,
const EinsumHeightMap& einsum_height_map) {
int max_height = 0;
for (HloInstruction* operand : instruction->mutable_operands()) {
auto operand_height_iter = einsum_height_map.find(operand);
CHECK(operand_height_iter != einsum_height_map.end())
<< "operand: " << operand->name();
const ShapeTree<int>& operand_height_tree = operand_height_iter->second;
int max_operand_height = GetMaxHeight(operand_height_tree);
max_height = std::max(max_height, max_operand_height);
}
return max_height;
}
}
absl::StatusOr<std::unique_ptr<EinsumHeightAnalysis>> EinsumHeightAnalysis::Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map) {
EinsumHeightAnalysis* analysis_ptr =
new EinsumHeightAnalysis(send_recv_group_map);
std::unique_ptr<EinsumHeightAnalysis> analysis(analysis_ptr);
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, {}));
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, {}));
return analysis;
}
absl::Status EinsumHeightAnalysis::RunInternal(
const HloComputation& computation,
absl::Span<HloInstruction* const> operands) {
return HandleCalledComputation(computation, operands);
}
ShapeTree<int>& EinsumHeightAnalysis::GetOrCreateHeightTree(
const HloInstruction* instruction) {
auto height_iter = einsum_height_map_.find(instruction);
if (height_iter == einsum_height_map_.end()) {
ShapeTree<int> height_tree(instruction->shape(), 0);
auto inserted = einsum_height_map_.insert(
std::make_pair(instruction, std::move(height_tree)));
height_iter = inserted.first;
}
return height_iter->second;
}
ShapeTree<int>& EinsumHeightAnalysis::GetHeightTreeOrDie(
const HloInstruction* instruction) {
auto height_iter = einsum_height_map_.find(instruction);
CHECK(height_iter != einsum_height_map_.end());
return height_iter->second;
}
bool EinsumHeightAnalysis::HasHeightFor(
const HloInstruction* instruction) const {
return einsum_height_map_.contains(instruction);
}
absl::Status EinsumHeightAnalysis::SetInstructionHeight(
const HloInstruction* instruction, int height) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
SetHeight(height_tree, height);
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::SetInstructionHeight(
const HloInstruction* instruction, const ShapeTree<int>& height) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
SetHeight(height_tree, height);
return absl::OkStatus();
}
#define RETURN_IF_HEIGHT_EXISTS(instruction) \
if (HasHeightFor(instruction)) { \
return absl::OkStatus(); \
}
absl::Status EinsumHeightAnalysis::HandleHeightIncrementInstruction(
HloInstruction* instruction) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(instruction);
for (HloInstruction* operand : instruction->mutable_operands()) {
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
SetHeight(height_tree, operand_height_tree.element({}) + 1);
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleCalledComputation(
const HloComputation& computation,
absl::Span<HloInstruction* const> operands) {
if (!operands.empty()) {
if (computation.num_parameters() != operands.size()) {
return absl::InvalidArgumentError(absl::StrCat(
operands.size(), " operands were passed for the computation ",
computation.name(), " with ", computation.num_parameters(),
" parameters."));
}
for (int parameter_index = 0;
parameter_index < computation.num_parameters(); ++parameter_index) {
HloInstruction* parameter =
computation.parameter_instruction(parameter_index);
HloInstruction* operand = operands[parameter_index];
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
TF_RETURN_IF_ERROR(SetInstructionHeight(parameter, operand_height_tree));
}
}
for (HloInstruction* instruction : computation.instructions()) {
if (instruction->user_count() == 0) {
TF_RETURN_IF_ERROR(instruction->Accept(this));
}
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::DefaultAction(HloInstruction* instruction) {
RETURN_IF_HEIGHT_EXISTS(instruction);
int instruction_height = GetMaxOperandHeight(instruction, einsum_height_map_);
return SetInstructionHeight(instruction, instruction_height);
}
absl::Status EinsumHeightAnalysis::HandleTupleLike(HloInstruction* tuple_like) {
ShapeTree<int>& height_tree = GetOrCreateHeightTree(tuple_like);
height_tree.ForEachMutableElement([&height_tree, tuple_like, this](
const ShapeIndex& index, int* height) {
if (!height_tree.IsLeaf(index)) {
return;
}
int operand_index = index.front();
const HloInstruction* operand = tuple_like->operand(operand_index);
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
ShapeIndex source_index = index;
source_index.pop_front();
*height = MergeHeight(*height, operand_height_tree.element(source_index));
});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleTuple(HloInstruction* tuple) {
RETURN_IF_HEIGHT_EXISTS(tuple);
return HandleTupleLike(tuple);
}
absl::Status EinsumHeightAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
RETURN_IF_HEIGHT_EXISTS(get_tuple_element);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(get_tuple_element);
const ShapeTree<int>& tuple_height_tree =
GetHeightTreeOrDie(get_tuple_element->operand(0));
int tuple_index = get_tuple_element->tuple_index();
SetHeight(height_tree, tuple_height_tree, {tuple_index}, {});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleDot(HloInstruction* dot) {
RETURN_IF_HEIGHT_EXISTS(dot);
return HandleHeightIncrementInstruction(dot);
}
absl::Status EinsumHeightAnalysis::HandleConvolution(
HloInstruction* convolution) {
RETURN_IF_HEIGHT_EXISTS(convolution);
return HandleHeightIncrementInstruction(convolution);
}
absl::Status EinsumHeightAnalysis::HandleCall(HloInstruction* call) {
RETURN_IF_HEIGHT_EXISTS(call);
TF_RETURN_IF_ERROR(HandleCalledComputation(*(call->called_computations()[0]),
call->mutable_operands()));
const ShapeTree<int>& root_height_tree =
GetHeightTreeOrDie(call->called_computations()[0]->root_instruction());
TF_RETURN_IF_ERROR(SetInstructionHeight(call, root_height_tree));
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleFusion(HloInstruction* fusion) {
RETURN_IF_HEIGHT_EXISTS(fusion);
return HandleCall(fusion);
}
absl::Status EinsumHeightAnalysis::HandleWhile(HloInstruction* xla_while) {
RETURN_IF_HEIGHT_EXISTS(xla_while);
TF_RETURN_IF_ERROR(HandleCalledComputation(*(xla_while->while_condition()),
xla_while->mutable_operands()));
TF_RETURN_IF_ERROR(HandleCalledComputation(*(xla_while->while_body()),
xla_while->mutable_operands()));
const ShapeTree<int>& root_height_tree =
GetHeightTreeOrDie(xla_while->while_body()->root_instruction());
return SetInstructionHeight(xla_while, root_height_tree);
}
absl::Status EinsumHeightAnalysis::HandleConditional(
HloInstruction* conditional) {
RETURN_IF_HEIGHT_EXISTS(conditional);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(conditional);
for (size_t i = 0; i < conditional->branch_count(); ++i) {
HloComputation* computation = conditional->branch_computation(i);
TF_RETURN_IF_ERROR(HandleCalledComputation(
*computation, {conditional->mutable_operands()[i + 1]}));
ShapeTree<int>& branch_root_height_tree =
GetHeightTreeOrDie(computation->root_instruction());
SetHeight(height_tree, branch_root_height_tree);
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleSend(HloInstruction* send) {
RETURN_IF_HEIGHT_EXISTS(send);
HloInstruction* send_buffer = send->mutable_operand(0);
const ShapeTree<int>& send_buffer_height_tree =
GetHeightTreeOrDie(send_buffer);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(send);
SetHeight(height_tree, send_buffer_height_tree, {}, {0});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleRecv(HloInstruction* recv) {
RETURN_IF_HEIGHT_EXISTS(recv);
TF_ASSIGN_OR_RETURN(HloInstruction * send,
send_recv_group_map_->GetMatchingSendOrRecv(recv));
TF_RETURN_IF_ERROR(send->Accept(this));
HloInstruction* send_buffer = send->mutable_operand(0);
const ShapeTree<int>& send_buffer_height_tree =
GetHeightTreeOrDie(send_buffer);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(recv);
SetHeight(height_tree, send_buffer_height_tree, {}, {0});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleSendDone(HloInstruction* send_done) {
RETURN_IF_HEIGHT_EXISTS(send_done);
GetOrCreateHeightTree(send_done);
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleRecvDone(HloInstruction* recv_done) {
RETURN_IF_HEIGHT_EXISTS(recv_done);
HloInstruction* recv = recv_done->mutable_operand(0);
const ShapeTree<int>& recv_height_tree = GetHeightTreeOrDie(recv);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(recv_done);
SetHeight(height_tree, recv_height_tree, {0}, {0});
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleAllReduce(HloInstruction* all_reduce) {
RETURN_IF_HEIGHT_EXISTS(all_reduce);
if (all_reduce->shape().IsArray()) {
return DefaultAction(all_reduce);
}
return HandleTupleLike(all_reduce);
}
absl::Status EinsumHeightAnalysis::HandleAsyncStart(
HloInstruction* async_start) {
RETURN_IF_HEIGHT_EXISTS(async_start);
TF_RETURN_IF_ERROR(
HandleCalledComputation(*(async_start->async_wrapped_computation()),
async_start->mutable_operands()));
const ShapeTree<int>& root_height_tree = GetHeightTreeOrDie(
async_start->async_wrapped_computation()->root_instruction());
ShapeTree<int>& height_tree = GetOrCreateHeightTree(async_start);
SetHeight(height_tree, root_height_tree, {}, {1});
for (int operand_index = 0; operand_index < async_start->operands().size();
++operand_index) {
HloInstruction* operand = async_start->mutable_operands()[operand_index];
const ShapeTree<int>& operand_height_tree = GetHeightTreeOrDie(operand);
SetHeight(height_tree, operand_height_tree, {}, {0, operand_index});
}
return absl::OkStatus();
}
absl::Status EinsumHeightAnalysis::HandleAsyncDone(HloInstruction* async_done) {
RETURN_IF_HEIGHT_EXISTS(async_done);
ShapeTree<int>& height_tree = GetOrCreateHeightTree(async_done);
HloInstruction* async_start = async_done->mutable_operand(0);
const ShapeTree<int>& async_start_height_tree =
GetHeightTreeOrDie(async_start);
SetHeight(height_tree, async_start_height_tree, {1}, {});
return absl::OkStatus();
}
std::string HloValueSemanticLabelToString(HloValueSemanticLabel label) {
switch (label) {
case HloValueSemanticLabel::kStatic:
return "Static";
case HloValueSemanticLabel::kRandom:
return "Random";
case HloValueSemanticLabel::kWeight:
return "Weight";
case HloValueSemanticLabel::kActivation:
return "Activation";
case HloValueSemanticLabel::kActivationGradient:
return "ActivationGradient";
case HloValueSemanticLabel::kWeightGradient:
return "WeightGradient";
case HloValueSemanticLabel::kTupleOrToken:
return "TupleOrToken";
}
}
std::string HloValueSemantics::ToString() const {
std::string content = absl::StrJoin(
{absl::StrCat("label: ", HloValueSemanticLabelToString(label_)),
absl::StrCat("origin: ", origin_.ToString())},
", ");
return absl::StrCat("{", content, "}");
}
HloValueSemantics::HloValueSemantics(HloValueSemanticLabel label,
const HloPosition& origin)
: HloValueSemantics(0, label, origin) {}
HloValueSemantics::HloValueSemantics(Id id, HloValueSemanticLabel label,
const HloPosition& origin)
: id_(id), label_(label), origin_(origin) {}
std::string HloValueSemanticsTreeToString(
const ShapeTree<const HloValueSemantics*>& tree) {
return ToString(tree);
}
HloValueSemanticsAnalysis::HloValueSemanticsAnalysis(
const HloModule& module,
const absl::flat_hash_set<std::string_view>& execution_threads)
: module_(module), execution_threads_(execution_threads), next_id_(0) {}
const HloValueSemantics* HloValueSemanticsAnalysis::GetSemantics(
const HloInstruction* instruction, const ShapeIndex& index) const {
return GetInstructionSemantics(instruction).element(index);
}
int HloValueSemanticsAnalysis::GetDepth(const HloInstruction* instruction,
const ShapeIndex& index) const {
auto depth_iter = einsum_depth_map_.find(instruction);
CHECK(depth_iter != einsum_depth_map_.end());
return depth_iter->second.element(index);
}
int HloValueSemanticsAnalysis::GetHeight(const HloInstruction* instruction,
const ShapeIndex& index) const {
auto height_iter = einsum_height_map_.find(instruction);
CHECK(height_iter != einsum_height_map_.end());
return height_iter->second.element(index);
}
absl::StatusOr<std::unique_ptr<HloValueSemanticsAnalysis>>
HloValueSemanticsAnalysis::Run(
const HloModule& module,
const absl::flat_hash_set<std::string_view>& execution_threads) {
std::unique_ptr<HloValueSemanticsAnalysis> value_semantics_analysis =
absl::WrapUnique(
new HloValueSemanticsAnalysis(module, execution_threads));
value_semantics_analysis->InitializeSendRecvGroups();
TF_RETURN_IF_ERROR(value_semantics_analysis->InitializeEinsumDepth());
TF_RETURN_IF_ERROR(value_semantics_analysis->InitializeEinsumHeight());
value_semantics_analysis->AnnotateWeights();
TF_RETURN_IF_ERROR(
value_semantics_analysis->RunOnComputation(*module.entry_computation()));
return value_semantics_analysis;
}
absl::Status HloValueSemanticsAnalysis::InitializeEinsumDepth() {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<EinsumDepthAnalysis> einsum_depth_analysis,
EinsumDepthAnalysis::Run(*module_.entry_computation(),
*send_recv_group_map_));
einsum_depth_map_ = einsum_depth_analysis->GetEinsumDepthMap();
return absl::OkStatus();
}
absl::Status HloValueSemanticsAnalysis::InitializeEinsumHeight() {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<EinsumHeightAnalysis> einsum_height_analysis,
EinsumHeightAnalysis::Run(*module_.entry_computation(),
*send_recv_group_map_));
einsum_height_map_ = einsum_height_analysis->GetEinsumHeightMap();
return absl::OkStatus();
}
void HloValueSemanticsAnalysis::InitializeSendRecvGroups() {
send_recv_group_map_ = std::make_unique<SendRecvGroupMap>(module_);
}
bool HloValueSemanticsAnalysis::HasSemanticsFor(
const HloInstruction* instruction) const {
return value_semantics_.contains(instruction);
}
absl::StatusOr<HloInstruction*>
HloValueSemanticsAnalysis::GetMatchingSendOrRecv(
HloInstruction* send_or_recv) const {
return send_recv_group_map_->GetMatchingSendOrRecv(send_or_recv);
}
HloValueSemantics::Id HloValueSemanticsAnalysis::NextId() { return next_id_++; }
const HloValueSemantics* HloValueSemanticsAnalysis::NewHloValueSemantics(
HloValueSemanticLabel label, const HloPosition& origin) {
HloValueSemantics::Id id = NextId();
auto inserted = value_semantics_map_.insert(std::make_pair(
id, std::make_unique<HloValueSemantics>(id, label, origin)));
return inserted.first->second.get();
}
const ShapeTree<const HloValueSemantics*>&
HloValueSemanticsAnalysis::GetInstructionSemantics(
const HloInstruction* instruction) const {
auto semantics_iter = value_semantics_.find(instruction);
CHECK(semantics_iter != value_semantics_.end())
<< "instruction: " << instruction->ToString();
return semantics_iter->second;
}
void HloValueSemanticsAnalysis::DeepCopyHloValueSemantics(
ShapeTree<const HloValueSemantics*>& copy_to,
const ShapeTree<const HloValueSemantics*>& copy_from,
const ShapeIndex& source_index, const ShapeIndex& destination_index) {
copy_to.ForEachMutableElement(
[this, ©_from, &source_index, &destination_index](
const ShapeIndex& index, const HloValueSemantics** semantics) {
if (index.size() < destination_index.size()) {
return;
}
bool in_subtree_to_copy = true;
for (int i = 0; i < destination_index.size(); ++i) {
if (index[i] != destination_index[i]) {
in_subtree_to_copy = false;
break;
}
}
if (!in_subtree_to_copy) {
return;
}
ShapeIndex full_source_index = source_index;
for (int i = destination_index.size(); i < index.size(); ++i) {
full_source_index.push_back(index[i]);
}
const HloValueSemantics* source_semantics =
copy_from.element(full_source_index);
*semantics = NewHloValueSemantics(source_semantics->label(),
source_semantics->origin());
});
}
void HloValueSemanticsAnalysis::DeepCopyHloValueSemantics(
const HloInstruction* target,
const ShapeTree<const HloValueSemantics*>& copy_from,
const ShapeIndex& source_index) {
auto semantics_iter = value_semantics_.find(target);
if (semantics_iter != value_semantics_.end()) {
DeleteHloValueSemantics(semantics_iter->second);
DeepCopyHloValueSemantics(semantics_iter->second, copy_from, source_index,
{});
return;
}
ShapeTree<const HloValueSemantics*> semantics_shape_tree(target->shape(),
nullptr);
DeepCopyHloValueSemantics(semantics_shape_tree, copy_from, source_index, {});
value_semantics_[target] = std::move(semantics_shape_tree);
}
void HloValueSemanticsAnalysis::SetHloValueSemantics(
const HloInstruction* target,
const ShapeTree<const HloValueSemantics*>& semantics) {
auto semantics_iter = value_semantics_.find(target);
if (semantics_iter != value_semantics_.end()) {
DeleteHloValueSemantics(semantics_iter->second);
}
value_semantics_[target] = semantics;
}
void HloValueSemanticsAnalysis::DeleteHloValueSemantics(
const HloValueSemantics* to_delete) {
value_semantics_map_.erase(to_delete->id());
}
void HloValueSemanticsAnalysis::DeleteHloValueSemantics(
const ShapeTree<const HloValueSemantics*>& to_delete) {
to_delete.ForEachElement(
[this](const ShapeIndex& index, const HloValueSemantics* semantics) {
DeleteHloValueSemantics(semantics);
});
}
void HloValueSemanticsAnalysis::AnnotateWeights() {
const HloComputation* entry_computation = module_.entry_computation();
for (HloInstruction* parameter :
entry_computation->parameter_instructions()) {
ShapeTree<const HloValueSemantics*> semantics_shape_tree(parameter->shape(),
nullptr);
semantics_shape_tree.ForEachMutableElement(
[this, &semantics_shape_tree, parameter](
const ShapeIndex& index, const HloValueSemantics** semantics) {
if (!semantics_shape_tree.IsLeaf(index)) {
*semantics = NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {parameter, index});
}
*semantics = NewHloValueSemantics(HloValueSemanticLabel::kWeight,
{parameter, index});
});
value_semantics_[parameter] = std::move(semantics_shape_tree);
}
}
absl::Status HloValueSemanticsAnalysis::RunOnComputation(
const HloComputation& computation,
absl::Span<const HloInstruction* const> operands) {
CHECK_EQ(computation.num_parameters(), operands.size());
for (int i = 0; i < computation.num_parameters(); ++i) {
auto semantics_iter = value_semantics_.find(operands[i]);
CHECK(semantics_iter != value_semantics_.end());
DeepCopyHloValueSemantics(computation.parameter_instructions()[i],
semantics_iter->second);
}
return RunOnComputation(computation);
}
absl::Status HloValueSemanticsAnalysis::RunOnComputation(
const HloComputation& computation) {
if (HloInstruction::IsThreadIncluded(computation.execution_thread(),
execution_threads_)) {
HloValueSemanticsPropagation propagation(this);
return propagation.Run(computation);
}
return absl::OkStatus();
}
HloValueSemanticsPropagation::HloValueSemanticsPropagation(
HloValueSemanticsAnalysis* analysis)
: analysis_(analysis) {}
absl::Status HloValueSemanticsPropagation::Run(
const HloComputation& computation) {
TF_RETURN_IF_ERROR(computation.root_instruction()->Accept(this));
for (HloInstruction* instruction : computation.instructions()) {
if (instruction->user_count() == 0) {
TF_RETURN_IF_ERROR(instruction->Accept(this));
}
}
return absl::OkStatus();
}
HloValueSemantics HloValueSemanticsPropagation::CopySemantics(
const HloValueSemantics& semantics) const {
return HloValueSemantics(semantics.label(), semantics.origin());
}
HloValueSemantics HloValueSemanticsPropagation::CopySemanticsWithNewOrigin(
const HloValueSemantics& semantics, HloInstruction* new_origin,
const ShapeIndex& index) const {
return HloValueSemantics(semantics.label(), {new_origin, index});
}
const HloValueSemantics* HloValueSemanticsPropagation::AddSemantics(
const HloValueSemantics& semantics) {
return analysis_->NewHloValueSemantics(semantics.label(), semantics.origin());
}
std::vector<HloValueSemanticsPropagation::EinsumAndOperandIndex>
HloValueSemanticsPropagation::FindEinsumsWhereOriginDependsOnOther(
const HloValueSemantics& semantics, const HloPosition& origin_dependence,
bool recursive) const {
std::vector<HloPosition> stack;
absl::flat_hash_set<HloPosition> visited;
std::vector<HloValueSemanticsPropagation::EinsumAndOperandIndex>
dependent_einsums;
stack.push_back(semantics.origin());
while (!stack.empty()) {
HloPosition origin = stack.back();
stack.pop_back();
if (visited.contains(origin)) {
continue;
}
visited.insert(origin);
absl::Span<const HloInstruction* const> operands =
origin.instruction->operands();
if (origin.instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
operands = operands.subspan(0, 2);
}
if (origin.instruction->opcode() == HloOpcode::kDynamicSlice) {
operands = operands.subspan(0, 1);
}
bool is_einsum = origin.instruction->opcode() == HloOpcode::kDot ||
origin.instruction->opcode() == HloOpcode::kConvolution;
bool found_einsum = false;
if (is_einsum) {
for (int64_t operand_index = 0; operand_index < operands.size();
++operand_index) {
const HloInstruction* origin_operand = operands[operand_index];
const HloValueSemantics* origin_operand_semantics =
analysis_->GetSemantics(origin_operand);
if (origin_operand_semantics->origin() == origin_dependence) {
dependent_einsums.push_back({origin.instruction, operand_index});
found_einsum = true;
}
}
}
if (!found_einsum && recursive) {
for (int64_t operand_index = 0; operand_index < operands.size();
++operand_index) {
const HloInstruction* origin_operand = operands[operand_index];
const HloValueSemantics* origin_operand_semantics =
analysis_->GetSemantics(origin_operand);
stack.push_back(origin_operand_semantics->origin());
}
}
}
return dependent_einsums;
}
bool HloValueSemanticsPropagation::OriginDependsOn(
const HloValueSemantics& semantics, const HloPosition& origin_dependence,
bool recursive) const {
auto dependent_einsums = FindEinsumsWhereOriginDependsOnOther(
semantics, origin_dependence, recursive);
return !dependent_einsums.empty();
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromStaticAndOther(
const HloValueSemantics& static_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const {
CHECK(static_semantics.label() == HloValueSemanticLabel::kStatic)
<< __func__ << ", : " << static_semantics.ToString();
if (other_semantics.label() == HloValueSemanticLabel::kStatic) {
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
bool is_dot_or_convolution = instruction->opcode() == HloOpcode::kDot ||
instruction->opcode() == HloOpcode::kConvolution;
if (is_dot_or_convolution &&
other_semantics.label() == HloValueSemanticLabel::kActivationGradient) {
return MaybeCreateGradientSemantics(
instruction, HloValueSemanticLabel::kActivationGradient);
}
return CopySemantics(other_semantics);
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromRandomAndOther(
const HloValueSemantics& random_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const {
CHECK(random_semantics.label() == HloValueSemanticLabel::kRandom);
CHECK(other_semantics.label() != HloValueSemanticLabel::kStatic);
if (other_semantics.label() == HloValueSemanticLabel::kRandom) {
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
return CopySemantics(other_semantics);
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::MaybeCreateGradientSemantics(
HloInstruction* gradient_candidate,
HloValueSemanticLabel fallback_label) const {
int gradient_depth = analysis_->GetDepth(gradient_candidate, {});
if (gradient_depth < 0) {
return HloValueSemantics(HloValueSemanticLabel::kActivation,
{gradient_candidate, {}});
}
if (gradient_depth == 0) {
return HloValueSemantics(HloValueSemanticLabel::kWeightGradient,
{gradient_candidate, {}});
}
return HloValueSemantics(fallback_label, {gradient_candidate, {}});
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromWeightAndOther(
const HloValueSemantics& weight_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const {
CHECK(weight_semantics.label() == HloValueSemanticLabel::kWeight);
CHECK(other_semantics.label() != HloValueSemanticLabel::kStatic &&
other_semantics.label() != HloValueSemanticLabel::kRandom);
bool is_dot_or_convolution = instruction->opcode() == HloOpcode::kDot ||
instruction->opcode() == HloOpcode::kConvolution;
if (other_semantics.label() == HloValueSemanticLabel::kWeight) {
if (!is_dot_or_convolution) {
if (weight_semantics.origin() == other_semantics.origin()) {
return CopySemantics(other_semantics);
}
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
return HloValueSemantics(HloValueSemanticLabel::kActivation,
{instruction, {}});
}
if (!is_dot_or_convolution) {
return CopySemantics(other_semantics);
}
if (other_semantics.label() == HloValueSemanticLabel::kActivation) {
int instruction_depth = analysis_->GetDepth(instruction, {});
auto dependent_einsums = FindEinsumsWhereOriginDependsOnOther(
other_semantics, weight_semantics.origin(), true);
bool all_dependent_einsums_immediately_proceeds_instruction =
absl::c_all_of(dependent_einsums,
[instruction_depth,
this](const EinsumAndOperandIndex& dependent_einsum) {
int dependent_einsum_depth =
analysis_->GetDepth(dependent_einsum.einsum, {});
return dependent_einsum_depth > 0 &&
dependent_einsum_depth == instruction_depth + 1;
});
if (!dependent_einsums.empty() &&
all_dependent_einsums_immediately_proceeds_instruction) {
return MaybeCreateGradientSemantics(
instruction, HloValueSemanticLabel::kActivationGradient);
}
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
if (other_semantics.label() == HloValueSemanticLabel::kActivationGradient) {
return MaybeCreateGradientSemantics(
instruction, HloValueSemanticLabel::kActivationGradient);
}
CHECK(other_semantics.label() == HloValueSemanticLabel::kWeightGradient);
return CopySemantics(other_semantics);
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromActivationAndOther(
const HloValueSemantics& activation_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const {
CHECK(activation_semantics.label() == HloValueSemanticLabel::kActivation);
CHECK(other_semantics.label() != HloValueSemanticLabel::kStatic &&
other_semantics.label() != HloValueSemanticLabel::kRandom &&
other_semantics.label() != HloValueSemanticLabel::kWeight);
bool is_dot_or_convolution = instruction->opcode() == HloOpcode::kDot ||
instruction->opcode() == HloOpcode::kConvolution;
if (!is_dot_or_convolution) {
if (activation_semantics.origin() == other_semantics.origin()) {
return CopySemantics(other_semantics);
}
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
if (other_semantics.label() == HloValueSemanticLabel::kActivation) {
bool other_depends_on_activation = OriginDependsOn(
other_semantics, activation_semantics.origin(), true);
bool activation_depends_on_other =
OriginDependsOn(activation_semantics, other_semantics.origin(),
true);
CHECK(!other_depends_on_activation || !activation_depends_on_other);
if (other_depends_on_activation || activation_depends_on_other) {
return MaybeCreateGradientSemantics(instruction,
HloValueSemanticLabel::kActivation);
}
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
if (other_semantics.label() == HloValueSemanticLabel::kActivationGradient) {
return MaybeCreateGradientSemantics(
instruction, HloValueSemanticLabel::kActivationGradient);
}
CHECK(other_semantics.label() == HloValueSemanticLabel::kWeightGradient)
<< "instruction: " << instruction->ToString()
<< ", semantics: " << other_semantics.ToString()
<< ", expected: WeightGradient.";
return CopySemantics(other_semantics);
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromActivationGradientAndOther(
const HloValueSemantics& activation_gradient_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const {
CHECK(activation_gradient_semantics.label() ==
HloValueSemanticLabel::kActivationGradient);
CHECK(other_semantics.label() != HloValueSemanticLabel::kStatic &&
other_semantics.label() != HloValueSemanticLabel::kRandom &&
other_semantics.label() != HloValueSemanticLabel::kWeight &&
other_semantics.label() != HloValueSemanticLabel::kActivation);
if (other_semantics.label() == HloValueSemanticLabel::kActivationGradient) {
if (other_semantics.origin() == activation_gradient_semantics.origin()) {
return CopySemantics(activation_gradient_semantics);
}
return CopySemanticsWithNewOrigin(other_semantics, instruction);
}
CHECK(other_semantics.label() == HloValueSemanticLabel::kWeightGradient);
return CopySemantics(other_semantics);
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromWeightGradientAndOther(
const HloValueSemantics& weight_gradient_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const {
CHECK(weight_gradient_semantics.label() ==
HloValueSemanticLabel::kWeightGradient);
CHECK(other_semantics.label() != HloValueSemanticLabel::kStatic &&
other_semantics.label() != HloValueSemanticLabel::kRandom &&
other_semantics.label() != HloValueSemanticLabel::kWeight &&
other_semantics.label() != HloValueSemanticLabel::kActivation &&
other_semantics.label() != HloValueSemanticLabel::kActivationGradient);
return CopySemantics(weight_gradient_semantics);
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::MergeSemanticsForAnInstruction(
HloInstruction* instruction,
std::vector<HloValueSemantics>& semantics_vec) const {
while (semantics_vec.size() >= 2) {
absl::Span<const HloValueSemantics> operand_list =
absl::MakeConstSpan(semantics_vec).subspan(semantics_vec.size() - 2, 2);
auto find_operand_index_with_label =
[&operand_list](HloValueSemanticLabel label) -> std::optional<int64_t> {
auto iter = absl::c_find_if(operand_list,
[label](const HloValueSemantics& operand) {
return operand.label() == label;
});
return (iter != operand_list.end())
? std::optional<int64_t>(
std::distance(operand_list.begin(), iter))
: std::nullopt;
};
auto replace_operands_semantics_with =
[&semantics_vec](const HloValueSemantics& result_semantics) {
semantics_vec.pop_back();
semantics_vec.pop_back();
semantics_vec.push_back(result_semantics);
};
if (auto index =
find_operand_index_with_label(HloValueSemanticLabel::kStatic)) {
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromStaticAndOther(
operand_list[*index], operand_list[1 - *index], instruction));
replace_operands_semantics_with(semantics);
continue;
}
if (auto index =
find_operand_index_with_label(HloValueSemanticLabel::kRandom)) {
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromRandomAndOther(
operand_list[*index], operand_list[1 - *index], instruction));
replace_operands_semantics_with(semantics);
continue;
}
if (auto index =
find_operand_index_with_label(HloValueSemanticLabel::kWeight)) {
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromWeightAndOther(
operand_list[*index], operand_list[1 - *index], instruction));
replace_operands_semantics_with(semantics);
continue;
}
if (auto index =
find_operand_index_with_label(HloValueSemanticLabel::kActivation)) {
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromActivationAndOther(
operand_list[*index], operand_list[1 - *index], instruction));
replace_operands_semantics_with(semantics);
continue;
}
if (auto index = find_operand_index_with_label(
HloValueSemanticLabel::kActivationGradient)) {
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromActivationGradientAndOther(
operand_list[*index], operand_list[1 - *index], instruction));
replace_operands_semantics_with(semantics);
continue;
}
if (auto index = find_operand_index_with_label(
HloValueSemanticLabel::kWeightGradient)) {
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromWeightGradientAndOther(
operand_list[*index], operand_list[1 - *index], instruction));
replace_operands_semantics_with(semantics);
continue;
}
if (operand_list[0].label() == HloValueSemanticLabel::kTupleOrToken &&
operand_list[1].label() == HloValueSemanticLabel::kTupleOrToken) {
HloValueSemantics semantics =
CopySemanticsWithNewOrigin(operand_list[0], instruction);
replace_operands_semantics_with(semantics);
continue;
}
LOG(FATAL) << "We don't expect to handle operands of label "
<< HloValueSemanticLabelToString(operand_list[0].label())
<< " and "
<< HloValueSemanticLabelToString(operand_list[1].label())
<< " in ComputeSemanticsFromOperands. Instruction: "
<< instruction->name()
<< " should be handled in its own handler instead of the "
"default handler.";
}
VLOG(3) << __func__
<< ", result semantics: " << semantics_vec.back().ToString();
return semantics_vec.back();
}
absl::StatusOr<HloValueSemantics>
HloValueSemanticsPropagation::ComputeSemanticsFromOperands(
HloInstruction* instruction, absl::Span<const int64_t> operand_indices,
absl::Span<const ShapeIndex> operand_shape_indices) const {
CHECK(!operand_indices.empty());
CHECK(operand_shape_indices.empty() ||
operand_indices.size() == operand_shape_indices.size());
VLOG(3) << __func__ << ", instruction: " << instruction->ToString();
std::vector<HloValueSemantics> semantics_vec;
for (int64_t operand_index : operand_indices) {
const HloInstruction* operand = instruction->operand(operand_index);
const HloValueSemantics* operand_semantics = analysis_->GetSemantics(
operand, operand_shape_indices.empty()
? ShapeIndex()
: operand_shape_indices[operand_index]);
auto operand_height_iter = analysis_->GetEinsumHeightMap().find(operand);
CHECK(operand_height_iter != analysis_->GetEinsumHeightMap().end())
<< "operand: " << operand->name();
VLOG(3) << __func__ << ", operand_index: " << operand_index
<< ", operand: " << operand->name()
<< ", operand_semantics: " << operand_semantics->ToString()
<< ", height: " << ToString(operand_height_iter->second);
semantics_vec.push_back(*operand_semantics);
}
return MergeSemanticsForAnInstruction(instruction, semantics_vec);
}
#define RETURN_IF_ALREADY_PROPAGATED(instruction) \
if (analysis_->HasSemanticsFor(instruction)) { \
return absl::OkStatus(); \
}
absl::Status HloValueSemanticsPropagation::DefaultAction(
HloInstruction* instruction) {
RETURN_IF_ALREADY_PROPAGATED(instruction);
std::vector<int64_t> operand_indices(instruction->operand_count());
std::iota(operand_indices.begin(), operand_indices.end(), 0);
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromOperands(instruction, operand_indices));
if (instruction->shape().IsTuple()) {
ShapeTree<const HloValueSemantics*> semantics_shape_tree(
instruction->shape(), nullptr);
semantics_shape_tree.ForEachMutableElement(
[this, &semantics, &semantics_shape_tree, instruction](
const ShapeIndex& index, const HloValueSemantics** semantics_ptr) {
if (semantics_shape_tree.IsLeaf(index)) {
HloValueSemantics sub_semantics =
CopySemanticsWithNewOrigin(semantics, instruction, index);
*semantics_ptr = AddSemantics(sub_semantics);
} else {
HloValueSemantics sub_semantics(
HloValueSemanticLabel::kTupleOrToken, {instruction, index});
*semantics_ptr = AddSemantics(sub_semantics);
}
});
analysis_->SetHloValueSemantics(instruction, semantics_shape_tree);
} else {
const HloValueSemantics* semantics_ptr = AddSemantics(semantics);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(
instruction->shape(), semantics_ptr);
analysis_->SetHloValueSemantics(instruction, semantics_shape_tree);
}
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleParameter(
HloInstruction* parameter) {
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleConstant(
HloInstruction* constant) {
RETURN_IF_ALREADY_PROPAGATED(constant);
const HloValueSemantics* constant_semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kStatic, {constant, {}});
ShapeTree<const HloValueSemantics*> semantics_shape_tree(constant->shape(),
constant_semantics);
analysis_->SetHloValueSemantics(constant, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleIota(HloInstruction* iota) {
RETURN_IF_ALREADY_PROPAGATED(iota);
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kStatic, {iota, {}});
ShapeTree<const HloValueSemantics*> semantics_shape_tree(iota->shape(),
semantics);
analysis_->SetHloValueSemantics(iota, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandlePartitionId(
HloInstruction* partition_id) {
RETURN_IF_ALREADY_PROPAGATED(partition_id);
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kStatic, {partition_id, {}});
ShapeTree<const HloValueSemantics*> semantics_shape_tree(
partition_id->shape(), semantics);
analysis_->SetHloValueSemantics(partition_id, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleReplicaId(
HloInstruction* replica_id) {
RETURN_IF_ALREADY_PROPAGATED(replica_id);
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kStatic, {replica_id, {}});
ShapeTree<const HloValueSemantics*> semantics_shape_tree(replica_id->shape(),
semantics);
analysis_->SetHloValueSemantics(replica_id, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleRngBitGenerator(
HloInstruction* rng_bit_generator) {
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kRandom, {rng_bit_generator, {}});
ShapeTree<const HloValueSemantics*> rbg_semantics_tree(
rng_bit_generator->shape(), semantics);
analysis_->SetHloValueSemantics(rng_bit_generator, rbg_semantics_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleClamp(HloInstruction* clamp) {
RETURN_IF_ALREADY_PROPAGATED(clamp);
const ShapeTree<const HloValueSemantics*>& operand_semantics =
analysis_->GetInstructionSemantics(clamp->operand(1));
analysis_->DeepCopyHloValueSemantics(clamp, operand_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleTuple(HloInstruction* tuple) {
RETURN_IF_ALREADY_PROPAGATED(tuple);
return HandleTupleLike(tuple);
}
absl::Status HloValueSemanticsPropagation::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
RETURN_IF_ALREADY_PROPAGATED(get_tuple_element);
const HloInstruction* tuple = get_tuple_element->operand(0);
int64_t tuple_index = get_tuple_element->tuple_index();
const ShapeTree<const HloValueSemantics*>& tuple_semantics =
analysis_->GetInstructionSemantics(tuple);
TF_ASSIGN_OR_RETURN(
ShapeTree<const HloValueSemantics*> tuple_element_semantics,
tuple_semantics.SubShapeTree({tuple_index}));
analysis_->DeepCopyHloValueSemantics(get_tuple_element,
tuple_element_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleCall(HloInstruction* call) {
RETURN_IF_ALREADY_PROPAGATED(call);
HloComputation* computation = call->called_computations()[0];
TF_RETURN_IF_ERROR(
analysis_->RunOnComputation(*computation, call->operands()));
const ShapeTree<const HloValueSemantics*>& root_semantics =
analysis_->GetInstructionSemantics(computation->root_instruction());
analysis_->DeepCopyHloValueSemantics(call, root_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleFusion(
HloInstruction* fusion) {
return HandleCall(fusion);
}
absl::Status HloValueSemanticsPropagation::HandleWhile(
HloInstruction* xla_while) {
RETURN_IF_ALREADY_PROPAGATED(xla_while);
TF_RETURN_IF_ERROR(analysis_->RunOnComputation(*xla_while->while_condition(),
xla_while->operands()));
HloComputation* computation = xla_while->while_body();
TF_RETURN_IF_ERROR(
analysis_->RunOnComputation(*computation, xla_while->operands()));
const ShapeTree<const HloValueSemantics*>& root_semantics =
analysis_->GetInstructionSemantics(computation->root_instruction());
analysis_->DeepCopyHloValueSemantics(xla_while, root_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleCustomCall(
HloInstruction* custom_call) {
RETURN_IF_ALREADY_PROPAGATED(custom_call);
if (custom_call->custom_call_target() == "Sharding" ||
custom_call->custom_call_target() == "SPMDFullToShardShape" ||
custom_call->custom_call_target() == "SPMDShardToFullShape") {
const ShapeTree<const HloValueSemantics*>& operand_semantics =
analysis_->GetInstructionSemantics(custom_call->operand(0));
analysis_->DeepCopyHloValueSemantics(custom_call, operand_semantics);
return absl::OkStatus();
}
return Unimplemented("Unimplemented custom-call: %s",
custom_call->custom_call_target());
}
absl::Status HloValueSemanticsPropagation::HandleConditional(
HloInstruction* conditional) {
RETURN_IF_ALREADY_PROPAGATED(conditional);
std::vector<ShapeTree<const HloValueSemantics*>> semantics_tree_vec;
for (int i = 0; i < conditional->called_computations().size(); ++i) {
HloComputation* computation = conditional->called_computations()[i];
TF_RETURN_IF_ERROR(analysis_->RunOnComputation(
*computation, {conditional->operands()[i + 1]}));
const ShapeTree<const HloValueSemantics*>& root_semantics =
analysis_->GetInstructionSemantics(computation->root_instruction());
semantics_tree_vec.push_back(root_semantics);
}
std::vector<HloValueSemantics> merged_semantics_leaves;
TF_RETURN_IF_ERROR(semantics_tree_vec[0].ForEachElementWithStatus(
[&](const ShapeIndex& index,
const HloValueSemantics* semantics) -> absl::Status {
std::vector<HloValueSemantics> semantics_vector;
semantics_vector.reserve(semantics_tree_vec.size());
for (size_t i = 0; i < semantics_tree_vec.size(); ++i) {
semantics_vector.push_back(
*(semantics_tree_vec[i].find(index)->second));
}
TF_ASSIGN_OR_RETURN(
HloValueSemantics merged,
MergeSemanticsForAnInstruction(conditional, semantics_vector));
merged_semantics_leaves.push_back(merged);
return absl::OkStatus();
}));
ShapeTree<const HloValueSemantics*> merged_semantics(conditional->shape());
int idx = 0;
merged_semantics.ForEachMutableElement(
[&](const ShapeIndex& index,
const HloValueSemantics** semantics) -> void {
*semantics = &merged_semantics_leaves[idx++];
});
analysis_->DeepCopyHloValueSemantics(conditional, merged_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleSelect(
HloInstruction* select) {
RETURN_IF_ALREADY_PROPAGATED(select);
TF_ASSIGN_OR_RETURN(HloValueSemantics semantics,
ComputeSemanticsFromOperands(select, {1, 2}));
const HloValueSemantics* semantics_ptr = AddSemantics(semantics);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(select->shape(),
semantics_ptr);
analysis_->SetHloValueSemantics(select, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleConcatenate(
HloInstruction* concatenate) {
RETURN_IF_ALREADY_PROPAGATED(concatenate);
const ShapeTree<const HloValueSemantics*>& operand_semantics =
analysis_->GetInstructionSemantics(concatenate->operand(0));
analysis_->DeepCopyHloValueSemantics(concatenate, operand_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleDynamicSlice(
HloInstruction* dynamic_slice) {
RETURN_IF_ALREADY_PROPAGATED(dynamic_slice);
const HloInstruction* dynamic_slice_operand = dynamic_slice->operand(0);
const HloValueSemantics* operand_semantics =
analysis_->GetSemantics(dynamic_slice_operand);
const HloValueSemantics* semantics = AddSemantics(*operand_semantics);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(
dynamic_slice->shape(), semantics);
analysis_->SetHloValueSemantics(dynamic_slice, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
RETURN_IF_ALREADY_PROPAGATED(dynamic_update_slice);
TF_ASSIGN_OR_RETURN(
HloValueSemantics semantics,
ComputeSemanticsFromOperands(dynamic_update_slice, {0, 1}));
const HloValueSemantics* semantics_ptr = AddSemantics(semantics);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(
dynamic_update_slice->shape(), semantics_ptr);
analysis_->SetHloValueSemantics(dynamic_update_slice, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleCopyStart(
HloInstruction* copy_start) {
return HandleCollectiveOrCopyStart(copy_start);
}
absl::Status HloValueSemanticsPropagation::HandleCopyDone(
HloInstruction* copy_done) {
return HandleCollectiveOrCopyDone(copy_done);
}
absl::Status HloValueSemanticsPropagation::HandleCollectiveOrCopyStart(
HloInstruction* op_start) {
RETURN_IF_ALREADY_PROPAGATED(op_start);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(op_start->shape());
const ShapeTree<const HloValueSemantics*>& operand_semantics_shape_tree =
analysis_->GetInstructionSemantics(op_start->operand(0));
analysis_->DeepCopyHloValueSemantics(semantics_shape_tree,
operand_semantics_shape_tree, {}, {0});
analysis_->DeepCopyHloValueSemantics(semantics_shape_tree,
operand_semantics_shape_tree, {}, {1});
semantics_shape_tree.ForEachMutableElement(
[this, op_start](const ShapeIndex& shape_index,
const HloValueSemantics** semantics) {
if (shape_index.empty()) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {op_start, {}});
}
if (shape_index == ShapeIndex{2}) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kRandom, {op_start, shape_index});
}
if (shape_index == ShapeIndex{3}) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kRandom, {op_start, shape_index});
}
});
analysis_->SetHloValueSemantics(op_start, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleCollectiveOrCopyDone(
HloInstruction* op_done) {
RETURN_IF_ALREADY_PROPAGATED(op_done);
const ShapeTree<const HloValueSemantics*>& operand_semantics_shape_tree =
analysis_->GetInstructionSemantics(op_done->operand(0));
analysis_->DeepCopyHloValueSemantics(op_done, operand_semantics_shape_tree,
{1});
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleAllGatherStart(
HloInstruction* all_gather_start) {
return HandleCollectiveOrCopyStart(all_gather_start);
}
absl::Status HloValueSemanticsPropagation::HandleAllGatherDone(
HloInstruction* all_gather_done) {
return HandleCollectiveOrCopyDone(all_gather_done);
}
absl::Status HloValueSemanticsPropagation::HandleCollectivePermuteStart(
HloInstruction* collective_permute_start) {
return HandleCollectiveOrCopyStart(collective_permute_start);
}
absl::Status HloValueSemanticsPropagation::HandleCollectivePermuteDone(
HloInstruction* collective_permute_done) {
return HandleCollectiveOrCopyDone(collective_permute_done);
}
absl::Status HloValueSemanticsPropagation::HandleGather(
HloInstruction* gather) {
RETURN_IF_ALREADY_PROPAGATED(gather);
const ShapeTree<const HloValueSemantics*>& operand_semantics_shape_tree =
analysis_->GetInstructionSemantics(gather->operand(0));
analysis_->DeepCopyHloValueSemantics(gather, operand_semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleScatter(
HloInstruction* scatter) {
RETURN_IF_ALREADY_PROPAGATED(scatter);
TF_ASSIGN_OR_RETURN(HloValueSemantics semantics,
ComputeSemanticsFromOperands(scatter, {0, 2}));
const HloValueSemantics* semantics_ptr = AddSemantics(semantics);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(scatter->shape(),
semantics_ptr);
analysis_->SetHloValueSemantics(scatter, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleAfterAll(
HloInstruction* after_all) {
RETURN_IF_ALREADY_PROPAGATED(after_all);
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {after_all, {}});
ShapeTree<const HloValueSemantics*> semantics_shape_tree(after_all->shape(),
semantics);
analysis_->SetHloValueSemantics(after_all, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleAllReduce(
HloInstruction* all_reduce) {
RETURN_IF_ALREADY_PROPAGATED(all_reduce);
if (all_reduce->shape().IsArray()) {
return DefaultAction(all_reduce);
}
CHECK(all_reduce->shape().IsTuple());
return HandleTupleLike(all_reduce);
}
absl::Status HloValueSemanticsPropagation::HandleAsyncStart(
HloInstruction* async_start) {
RETURN_IF_ALREADY_PROPAGATED(async_start);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(async_start->shape(),
nullptr);
HloComputation* computation = async_start->async_wrapped_computation();
const bool is_thread_included = HloInstruction::IsThreadIncluded(
computation->execution_thread(), analysis_->execution_threads_);
if (is_thread_included) {
TF_RETURN_IF_ERROR(
analysis_->RunOnComputation(*computation, async_start->operands()));
const ShapeTree<const HloValueSemantics*>& root_semantics =
analysis_->GetInstructionSemantics(computation->root_instruction());
analysis_->DeepCopyHloValueSemantics(semantics_shape_tree, root_semantics,
{}, {1});
}
for (int operand_index = 0; operand_index < async_start->operand_count();
++operand_index) {
HloInstruction* operand = async_start->mutable_operand(operand_index);
const ShapeTree<const HloValueSemantics*>& operand_semantics_tree =
analysis_->GetInstructionSemantics(operand);
analysis_->DeepCopyHloValueSemantics(
semantics_shape_tree, operand_semantics_tree, {}, {0, operand_index});
}
semantics_shape_tree.ForEachMutableElement(
[&semantics_shape_tree, this, async_start, is_thread_included](
const ShapeIndex& index, const HloValueSemantics** semantics_ptr) {
if (!semantics_shape_tree.IsLeaf(index)) {
*semantics_ptr = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {async_start, {}});
return;
}
if ((!is_thread_included && index.front() == 1) || index.front() == 2 ||
index.front() == 3) {
*semantics_ptr = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kRandom, {async_start, {}});
}
});
analysis_->SetHloValueSemantics(async_start, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleAsyncDone(
HloInstruction* async_done) {
RETURN_IF_ALREADY_PROPAGATED(async_done);
const ShapeTree<const HloValueSemantics*>& operand_semantics_tree =
analysis_->GetInstructionSemantics(async_done->operand(0));
analysis_->DeepCopyHloValueSemantics(async_done, operand_semantics_tree, {1});
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleInfeed(
HloInstruction* infeed) {
RETURN_IF_ALREADY_PROPAGATED(infeed);
ShapeTree<const HloValueSemantics*> semantics_shape_tree(infeed->shape(),
nullptr);
semantics_shape_tree.ForEachMutableElement(
[this, &semantics_shape_tree, infeed](
const ShapeIndex& shape_index, const HloValueSemantics** semantics) {
if (semantics_shape_tree.IsLeaf(shape_index)) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kWeight, {infeed, shape_index});
} else {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {infeed, shape_index});
}
});
analysis_->SetHloValueSemantics(infeed, semantics_shape_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleOutfeed(
HloInstruction* outfeed) {
RETURN_IF_ALREADY_PROPAGATED(outfeed);
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {outfeed, {}});
ShapeTree<const HloValueSemantics*> outfeed_semantics_tree(outfeed->shape(),
semantics);
analysis_->SetHloValueSemantics(outfeed, outfeed_semantics_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleDomain(
HloInstruction* domain) {
RETURN_IF_ALREADY_PROPAGATED(domain);
HloInstruction* domain_operand = domain->mutable_operand(0);
const ShapeTree<const HloValueSemantics*>& operand_semantics =
analysis_->GetInstructionSemantics(domain_operand);
analysis_->DeepCopyHloValueSemantics(domain, operand_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleOptimizationBarrier(
HloInstruction* opt_barrier) {
RETURN_IF_ALREADY_PROPAGATED(opt_barrier);
HloInstruction* opt_barrier_operand = opt_barrier->mutable_operand(0);
const ShapeTree<const HloValueSemantics*>& operand_semantics =
analysis_->GetInstructionSemantics(opt_barrier_operand);
analysis_->DeepCopyHloValueSemantics(opt_barrier, operand_semantics);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleSend(HloInstruction* send) {
RETURN_IF_ALREADY_PROPAGATED(send);
ShapeTree<const HloValueSemantics*> semantics_tree(send->shape(), nullptr);
HloInstruction* source_buffer = send->mutable_operand(0);
const ShapeTree<const HloValueSemantics*>& source_buffer_semantics =
analysis_->GetInstructionSemantics(source_buffer);
analysis_->DeepCopyHloValueSemantics(semantics_tree, source_buffer_semantics,
{}, {0});
semantics_tree.ForEachMutableElement(
[this, send, &semantics_tree](const ShapeIndex& index,
const HloValueSemantics** semantics) {
if (!index.empty()) {
if (index.front() == 1 && semantics_tree.IsLeaf(index)) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kRandom, {send, index});
return;
}
if (index.front() == 0) {
return;
}
}
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {send, index});
});
analysis_->SetHloValueSemantics(send, semantics_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleRecv(HloInstruction* recv) {
RETURN_IF_ALREADY_PROPAGATED(recv);
TF_ASSIGN_OR_RETURN(HloInstruction * send,
analysis_->GetMatchingSendOrRecv(recv));
TF_RETURN_IF_ERROR(send->Accept(this));
ShapeTree<const HloValueSemantics*> semantics_tree(recv->shape(), nullptr);
const ShapeTree<const HloValueSemantics*>& send_buffer_semantics =
analysis_->GetInstructionSemantics(send);
analysis_->DeepCopyHloValueSemantics(semantics_tree, send_buffer_semantics,
{0}, {0});
semantics_tree.ForEachMutableElement(
[this, recv, &semantics_tree](const ShapeIndex& index,
const HloValueSemantics** semantics) {
if (!index.empty()) {
if (index.front() == 1 && semantics_tree.IsLeaf(index)) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kRandom, {recv, index});
return;
}
if (index.front() == 0) {
return;
}
}
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {recv, index});
});
analysis_->SetHloValueSemantics(recv, semantics_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleSendDone(
HloInstruction* send_done) {
RETURN_IF_ALREADY_PROPAGATED(send_done);
const HloValueSemantics* semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {send_done, {}});
ShapeTree<const HloValueSemantics*> send_done_semantics_tree(
send_done->shape(), semantics);
analysis_->SetHloValueSemantics(send_done, send_done_semantics_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleRecvDone(
HloInstruction* recv_done) {
RETURN_IF_ALREADY_PROPAGATED(recv_done);
ShapeTree<const HloValueSemantics*> semantics_tree(recv_done->shape(),
nullptr);
HloInstruction* recv = recv_done->mutable_operand(0);
const ShapeTree<const HloValueSemantics*>& recv_semantics =
analysis_->GetInstructionSemantics(recv);
analysis_->DeepCopyHloValueSemantics(semantics_tree, recv_semantics, {0},
{0});
semantics_tree.ForEachMutableElement(
[this, recv_done](const ShapeIndex& index,
const HloValueSemantics** semantics) {
if (!index.empty() && index.front() == 0) {
return;
}
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {recv_done, index});
});
analysis_->SetHloValueSemantics(recv_done, semantics_tree);
return absl::OkStatus();
}
absl::Status HloValueSemanticsPropagation::HandleTupleLike(
HloInstruction* tuple_like) {
ShapeTree<const HloValueSemantics*> semantics_shape_tree(tuple_like->shape(),
nullptr);
for (int operand_index = 0; operand_index < tuple_like->operand_count();
++operand_index) {
const HloInstruction* operand = tuple_like->operand(operand_index);
const ShapeTree<const HloValueSemantics*>& operand_semantics =
analysis_->GetInstructionSemantics(operand);
analysis_->DeepCopyHloValueSemantics(
semantics_shape_tree, operand_semantics, {}, {operand_index});
}
semantics_shape_tree.ForEachMutableElement(
[tuple_like, this](const ShapeIndex& index,
const HloValueSemantics** semantics) {
if (index.empty()) {
*semantics = analysis_->NewHloValueSemantics(
HloValueSemanticLabel::kTupleOrToken, {tuple_like, {}});
return;
}
});
analysis_->SetHloValueSemantics(tuple_like, semantics_shape_tree);
return absl::OkStatus();
}
} | #include "xla/service/hlo_value_semantics_analysis.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const char kMnistHlo[] = R"(
HloModule MnistTrainingLoopWithInfeed.140, entry_computation_layout={(f32[784,128]{1,0:T(8,128)},f32[128]{0:T(256)},f32[128,32]{1,0:T(8,128)},f32[32]{0:T(256)},f32[32,10]{1,0:T(8,128)},f32[10]{0:T(256)})->(f32[784,128]{1,0:T(8,128)}, f32[128]{0:T(256)}, f32[128,32]{1,0:T(8,128)}, f32[32]{0:T(256)}, f32[32,10]{1,0:T(8,128)}, f32[10]{0:T(256)})}
relu.9 {
x.10 = f32[] parameter(0)
constant.11 = f32[] constant(0)
ROOT maximum.12 = f32[] maximum(x.10, constant.11)
}
max_F32.17 {
lhs.18 = f32[] parameter(0)
rhs.19 = f32[] parameter(1)
ROOT maximum.20 = f32[] maximum(lhs.18, rhs.19)
}
add_F32.1 {
lhs.22 = f32[] parameter(0)
rhs.23 = f32[] parameter(1)
ROOT add.24 = f32[] add(lhs.22, rhs.23)
}
relu_gradients.29 {
activation.30 = f32[] parameter(0)
constant.32 = f32[] constant(0)
compare.33 = pred[] compare(activation.30, constant.32), direction=GT
backprop.31 = f32[] parameter(1)
ROOT select.34 = f32[] select(compare.33, backprop.31, constant.32)
}
body.49 {
after-all.51 = token[] after-all()
infeed.52 = ((f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]), token[]) infeed(after-all.51)
get.53 = (f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]) get-tuple-element(infeed.52), index=0
get.54 = f32[100,784]{1,0} get-tuple-element(get.53), index=0
prev.50 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0)
get.57 = f32[784,128]{1,0} get-tuple-element(prev.50), index=0
dot.63 = f32[100,128]{1,0} dot(get.54, get.57), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.58 = f32[128]{0} get-tuple-element(prev.50), index=1
broadcast.64 = f32[100,128]{1,0} broadcast(get.58), dimensions={1}
add.65 = f32[100,128]{1,0} add(dot.63, broadcast.64)
map.66 = f32[100,128]{1,0} map(add.65), dimensions={0,1}, to_apply=relu.9
get.59 = f32[128,32]{1,0} get-tuple-element(prev.50), index=2
dot.67 = f32[100,32]{1,0} dot(map.66, get.59), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.60 = f32[32]{0} get-tuple-element(prev.50), index=3
broadcast.68 = f32[100,32]{1,0} broadcast(get.60), dimensions={1}
add.69 = f32[100,32]{1,0} add(dot.67, broadcast.68)
map.70 = f32[100,32]{1,0} map(add.69), dimensions={0,1}, to_apply=relu.9
get.61 = f32[32,10]{1,0} get-tuple-element(prev.50), index=4
dot.71 = f32[100,10]{1,0} dot(map.70, get.61), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.62 = f32[10]{0} get-tuple-element(prev.50), index=5
broadcast.72 = f32[100,10]{1,0} broadcast(get.62), dimensions={1}
add.73 = f32[100,10]{1,0} add(dot.71, broadcast.72)
constant.74 = f32[] constant(-inf)
reduce.75 = f32[100]{0} reduce(add.73, constant.74), dimensions={1}, to_apply=max_F32.17
broadcast.76 = f32[100,10]{1,0} broadcast(reduce.75), dimensions={0}
subtract.77 = f32[100,10]{1,0} subtract(add.73, broadcast.76)
exponential.78 = f32[100,10]{1,0} exponential(subtract.77)
constant.79 = f32[] constant(0)
reduce.80 = f32[100]{0} reduce(exponential.78, constant.79), dimensions={1}, to_apply=add_F32.1
broadcast.81 = f32[100,10]{1,0} broadcast(reduce.80), dimensions={0}
divide.82 = f32[100,10]{1,0} divide(exponential.78, broadcast.81)
get.55 = f32[100,10]{1,0} get-tuple-element(get.53), index=1
subtract.83 = f32[100,10]{1,0} subtract(divide.82, get.55)
transpose.88 = f32[10,32]{0,1} transpose(get.61), dimensions={1,0}
dot.89 = f32[100,32]{1,0} dot(subtract.83, transpose.88), lhs_contracting_dims={1}, rhs_contracting_dims={0}
map.90 = f32[100,32]{1,0} map(map.70, dot.89), dimensions={0,1}, to_apply=relu_gradients.29
transpose.95 = f32[32,128]{0,1} transpose(get.59), dimensions={1,0}
dot.96 = f32[100,128]{1,0} dot(map.90, transpose.95), lhs_contracting_dims={1}, rhs_contracting_dims={0}
map.97 = f32[100,128]{1,0} map(map.66, dot.96), dimensions={0,1}, to_apply=relu_gradients.29
transpose.98 = f32[784,100]{0,1} transpose(get.54), dimensions={1,0}
dot.99 = f32[784,128]{1,0} dot(transpose.98, map.97), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.104 = f32[] constant(0.01)
broadcast.105 = f32[784,128]{1,0} broadcast(constant.104), dimensions={}
multiply.106 = f32[784,128]{1,0} multiply(dot.99, broadcast.105)
subtract.107 = f32[784,128]{1,0} subtract(get.57, multiply.106)
reduce.101 = f32[128]{0} reduce(map.97, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.109 = f32[128]{0} broadcast(constant.104), dimensions={}
multiply.110 = f32[128]{0} multiply(reduce.101, broadcast.109)
subtract.111 = f32[128]{0} subtract(get.58, multiply.110)
transpose.91 = f32[128,100]{0,1} transpose(map.66), dimensions={1,0}
dot.92 = f32[128,32]{1,0} dot(transpose.91, map.90), lhs_contracting_dims={1}, rhs_contracting_dims={0}
broadcast.113 = f32[128,32]{1,0} broadcast(constant.104), dimensions={}
multiply.114 = f32[128,32]{1,0} multiply(dot.92, broadcast.113)
subtract.115 = f32[128,32]{1,0} subtract(get.59, multiply.114)
reduce.94 = f32[32]{0} reduce(map.90, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.117 = f32[32]{0} broadcast(constant.104), dimensions={}
multiply.118 = f32[32]{0} multiply(reduce.94, broadcast.117)
subtract.119 = f32[32]{0} subtract(get.60, multiply.118)
transpose.84 = f32[32,100]{0,1} transpose(map.70), dimensions={1,0}
dot.85 = f32[32,10]{1,0} dot(transpose.84, subtract.83), lhs_contracting_dims={1}, rhs_contracting_dims={0}
broadcast.121 = f32[32,10]{1,0} broadcast(constant.104), dimensions={}
multiply.122 = f32[32,10]{1,0} multiply(dot.85, broadcast.121)
subtract.123 = f32[32,10]{1,0} subtract(get.61, multiply.122)
reduce.87 = f32[10]{0} reduce(subtract.83, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.125 = f32[10]{0} broadcast(constant.104), dimensions={}
multiply.126 = f32[10]{0} multiply(reduce.87, broadcast.125)
subtract.127 = f32[10]{0} subtract(get.62, multiply.126)
get.56 = pred[] get-tuple-element(get.53), index=2
ROOT tuple.128 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(subtract.107, subtract.111, subtract.115, subtract.119, subtract.123, subtract.127, get.56)
}
condition.129 {
prev.130 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0)
ROOT get.131 = pred[] get-tuple-element(prev.130), index=6
}
ENTRY MnistTrainingLoopWithInfeed.140 {
layer1_weights.1 = f32[784,128]{1,0} parameter(0)
layer1_biases.2 = f32[128]{0} parameter(1)
layer2_weights.3 = f32[128,32]{1,0} parameter(2)
layer2_biases.4 = f32[32]{0} parameter(3)
layer3_weights.5 = f32[32,10]{1,0} parameter(4)
layer3_biases.6 = f32[10]{0} parameter(5)
constant.7 = pred[] constant(true)
tuple.8 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(layer1_weights.1, layer1_biases.2, layer2_weights.3, layer2_biases.4, layer3_weights.5, layer3_biases.6, constant.7)
while.132 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) while(tuple.8), condition=condition.129, body=body.49
get.133 = f32[784,128]{1,0} get-tuple-element(while.132), index=0
get.134 = f32[128]{0} get-tuple-element(while.132), index=1
get.135 = f32[128,32]{1,0} get-tuple-element(while.132), index=2
get.136 = f32[32]{0} get-tuple-element(while.132), index=3
get.137 = f32[32,10]{1,0} get-tuple-element(while.132), index=4
get.138 = f32[10]{0} get-tuple-element(while.132), index=5
ROOT tuple.139 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}) tuple(get.133, get.134, get.135, get.136, get.137, get.138)
}
)";
class HloValueSemanticsAnalysisTest : public HloTestBase {
public:
bool HasLabel(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name,
const HloValueSemanticLabel& expected_label) {
HloInstruction* instruction = FindInstruction(module, instruction_name);
const HloValueSemantics* semantics =
hlo_value_semantics_analysis.GetSemantics(instruction);
LOG(INFO) << "instruction: " << instruction->ToString()
<< semantics->ToString();
return semantics->label() == expected_label;
}
bool IsStatic(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kStatic);
}
bool IsWeight(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kWeight);
}
bool IsActivation(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kActivation);
}
bool IsActivationGradient(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kActivationGradient);
}
bool IsWeightGradient(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kWeightGradient);
}
bool IsTupleOrToken(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kTupleOrToken);
}
};
TEST_F(HloValueSemanticsAnalysisTest, OneMatmul) {
const std::string module_str = R"(
HloModule OneMatmul
region_0.39 {
Arg_0.40 = f32[] parameter(0)
Arg_1.41 = f32[] parameter(1)
ROOT add.42 = f32[] add(Arg_0.40, Arg_1.41)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_7.8 = f32[4,32]{1,0} parameter(1), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_7.8), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,128]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[2,1]0,1}
maximum.33 = f32[4,128]{1,0} maximum(dot.0, broadcast.2), sharding={devices=[2,1]0,1}
compare.34 = pred[4,128]{1,0} compare(dot.0, maximum.33), direction=EQ, sharding={devices=[2,1]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,128]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[2,1]0,1}
select.35 = f32[4,128]{1,0} select(compare.34, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
dot.2 = f32[32,128]{0,1} dot(copy, select.35), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.11 = f32[] constant(-0.01), sharding={replicated}
broadcast.12 = f32[32,128]{1,0} broadcast(constant.11), dimensions={}, sharding={devices=[2,1]0,1}
multiply.52 = f32[32,128]{0,1} multiply(dot.2, broadcast.12), sharding={devices=[2,1]0,1}
add.93 = f32[32,128]{1,0} add(Arg_1.2, multiply.52), sharding={devices=[2,1]0,1}
reduce.43 = f32[] reduce(maximum.33, constant.5), dimensions={0,1}, to_apply=region_0.39, sharding={replicated}
ROOT tuple.109 = (f32[32,128]{1,0}, f32[]) tuple(add.93, reduce.43), sharding={{devices=[2,1]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.35"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
}
TEST_F(HloValueSemanticsAnalysisTest, HandleConditional) {
const std::string module_str = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
tgte1 = f32[4] ceil(tparam)
ROOT tuple = (f32[4], f32[4]) tuple(tparam, tgte1)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread"
%async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start)
ROOT tuple = (f32[4], f32[4]) tuple(fparam, %async-done)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = (f32[4], f32[4]) conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsTupleOrToken(*hlo_value_semantics_analysis, module.get(),
"conditional"));
}
TEST_F(HloValueSemanticsAnalysisTest, TwoMatmuls) {
const std::string module_str = R"(
HloModule TwoMatmuls
region_0.44 {
Arg_0.45 = f32[] parameter(0)
Arg_1.46 = f32[] parameter(1)
ROOT add.47 = f32[] add(Arg_0.45, Arg_1.46)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
Arg_2.3 = f32[128,8]{1,0} parameter(1), sharding={devices=[1,2]0,1}
dot.1 = f32[4,8]{1,0} dot(dot.0, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.1 = f32[4,8]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,2]0,1}
maximum.38 = f32[4,8]{1,0} maximum(dot.1, broadcast.1), sharding={devices=[1,2]0,1}
compare.39 = pred[4,8]{1,0} compare(dot.1, maximum.38), direction=EQ, sharding={devices=[1,2]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.0 = f32[4,8]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[1,2]0,1}
select.40 = f32[4,8]{1,0} select(compare.39, broadcast.0, broadcast.1), sharding={devices=[1,2]0,1}
dot.2 = f32[4,128]{1,0} dot(select.40, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
dot.5 = f32[32,128]{0,1} dot(copy, dot.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.12 = f32[] constant(-0.01), sharding={replicated}
broadcast.13 = f32[32,128]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[2,1]0,1}
multiply.68 = f32[32,128]{0,1} multiply(dot.5, broadcast.13), sharding={devices=[2,1]0,1}
add.79 = f32[32,128]{1,0} add(Arg_1.2, multiply.68), sharding={devices=[2,1]0,1}
dot.6 = f32[128,8]{0,1} dot(dot.0, select.40), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
broadcast.11 = f32[128,8]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[1,2]0,1}
multiply.69 = f32[128,8]{0,1} multiply(dot.6, broadcast.11), sharding={devices=[1,2]0,1}
add.80 = f32[128,8]{1,0} add(Arg_2.3, multiply.69), sharding={devices=[1,2]0,1}
reduce.48 = f32[] reduce(maximum.38, constant.5), dimensions={0,1}, to_apply=region_0.44, sharding={replicated}
ROOT tuple.95 = (f32[32,128]{1,0}, f32[128,8]{1,0}, f32[]) tuple(add.79, add.80, reduce.48), sharding={{devices=[2,1]0,1}, {devices=[1,2]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_2.3"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.40"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.5"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.6"));
}
TEST_F(HloValueSemanticsAnalysisTest, RepeatWhile) {
const std::string module_str = R"(
HloModule RepeatWhile
region_0.52 {
arg_tuple.53 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.54 = s32[] get-tuple-element(arg_tuple.53), index=0, sharding={replicated}
constant.61 = s32[] constant(1), sharding={replicated}
add.105 = s32[] add(get-tuple-element.54, constant.61), sharding={replicated}
get-tuple-element.55 = f32[4,32]{1,0} get-tuple-element(arg_tuple.53), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.59 = f32[3,32,128]{2,1,0} get-tuple-element(arg_tuple.53), index=5, sharding={devices=[1,2,1]0,1}
constant.69 = s32[] constant(0), sharding={replicated}
compare.70 = pred[] compare(get-tuple-element.54, constant.69), direction=LT, sharding={replicated}
constant.68 = s32[] constant(3), sharding={replicated}
add.71 = s32[] add(get-tuple-element.54, constant.68), sharding={replicated}
select.72 = s32[] select(compare.70, add.71, get-tuple-element.54), sharding={replicated}
dynamic-slice.73 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.59, select.72, constant.69, constant.69), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.74 = f32[32,128]{1,0} reshape(dynamic-slice.73), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(get-tuple-element.55, reshape.74), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
get-tuple-element.60 = f32[3,128,32]{2,1,0} get-tuple-element(arg_tuple.53), index=6, sharding={devices=[1,1,2]0,1}
dynamic-slice.78 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.60, select.72, constant.69, constant.69), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.79 = f32[128,32]{1,0} reshape(dynamic-slice.78), sharding={devices=[1,2]0,1}
dot.1 = f32[4,32]{1,0} dot(dot.0, reshape.79), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.43 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,32]{1,0} broadcast(constant.43), dimensions={}, sharding={devices=[2,1]0,1}
maximum.84 = f32[4,32]{1,0} maximum(dot.1, broadcast.2), sharding={devices=[2,1]0,1}
get-tuple-element.56 = f32[3,4,128]{2,1,0} get-tuple-element(arg_tuple.53), index=2, sharding={devices=[1,2,1]0,1}
reshape.90 = f32[1,4,128]{2,1,0} reshape(dot.0), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.94 = f32[3,4,128]{2,1,0} dynamic-update-slice(get-tuple-element.56, reshape.90, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.57 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=3, sharding={devices=[1,2,1]0,1}
compare.85 = pred[4,32]{1,0} compare(dot.1, maximum.84), direction=EQ, sharding={devices=[2,1]0,1}
constant.42 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,32]{1,0} broadcast(constant.42), dimensions={}, sharding={devices=[2,1]0,1}
select.86 = f32[4,32]{1,0} select(compare.85, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
reshape.95 = f32[1,4,32]{2,1,0} reshape(select.86), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.99 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.57, reshape.95, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.58 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=4, sharding={devices=[1,2,1]0,1}
reshape.100 = f32[1,4,32]{2,1,0} reshape(get-tuple-element.55), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.104 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.58, reshape.100, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
ROOT tuple.106 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(add.105, maximum.84, dynamic-update-slice.94, dynamic-update-slice.99, dynamic-update-slice.104, get-tuple-element.59, get-tuple-element.60), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
}
region_1.107 {
arg_tuple.108 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.109 = s32[] get-tuple-element(arg_tuple.108), index=0, sharding={replicated}
constant.116 = s32[] constant(3)
ROOT compare.117 = pred[] compare(get-tuple-element.109, constant.116), direction=LT
}
region_2.126 {
Arg_0.127 = f32[] parameter(0)
Arg_1.128 = f32[] parameter(1)
ROOT add.129 = f32[] add(Arg_0.127, Arg_1.128)
}
wide.wide.region_3.156.clone.clone {
wide_param.7 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.185 = s32[] get-tuple-element(wide_param.7), index=0, sharding={replicated}
constant.34 = s32[] constant(1), sharding={replicated}
add.14 = s32[] add(get-tuple-element.185, constant.34), sharding={replicated}
get-tuple-element.186 = f32[4,32]{1,0} get-tuple-element(wide_param.7), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.190 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=5, sharding={devices=[1,2,1]0,1}
constant.35 = s32[] constant(3), sharding={replicated}
subtract.3 = s32[] subtract(constant.35, get-tuple-element.185), sharding={replicated}
constant.6..sunk.4 = s32[] constant(-1), sharding={replicated}
add.15 = s32[] add(subtract.3, constant.6..sunk.4), sharding={replicated}
constant.36 = s32[] constant(0), sharding={replicated}
compare.7 = pred[] compare(add.15, constant.36), direction=LT, sharding={replicated}
constant.26..sunk.1 = s32[] constant(2), sharding={replicated}
add.16 = s32[] add(subtract.3, constant.26..sunk.1), sharding={replicated}
select.4 = s32[] select(compare.7, add.16, add.15), sharding={replicated}
dynamic-slice.15 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.190, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.21 = f32[4,32]{1,0} reshape(dynamic-slice.15), sharding={devices=[2,1]0,1}
multiply.3 = f32[4,32]{1,0} multiply(get-tuple-element.186, reshape.21), sharding={devices=[2,1]0,1}
get-tuple-element.192 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=7, sharding={devices=[1,1,2]0,1}
dynamic-slice.16 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.192, select.4, constant.36, constant.36), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.22 = f32[128,32]{1,0} reshape(dynamic-slice.16), sharding={devices=[1,2]0,1}
dot.20 = f32[4,128]{1,0} dot(multiply.3, reshape.22), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
get-tuple-element.191 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=6, sharding={devices=[1,2,1]0,1}
dynamic-slice.17 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.191, select.4, constant.36, constant.36), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.23 = f32[32,128]{1,0} reshape(dynamic-slice.17), sharding={devices=[2,1]0,1}
dot.21 = f32[4,32]{1,0} dot(dot.20, reshape.23), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[1,2]0,1}
get-tuple-element.187 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.193 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=8, sharding={devices=[1,2,1]0,1}
dynamic-slice.18 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.193, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.24 = f32[4,32]{1,0} reshape(dynamic-slice.18), sharding={devices=[2,1]0,1}
dot.22 = f32[32,128]{0,1} dot(reshape.24, dot.20), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
reshape.25 = f32[1,32,128]{2,1,0} reshape(dot.22), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.6 = f32[3,32,128]{2,1,0} dynamic-update-slice(get-tuple-element.187, reshape.25, select.4, constant.36, constant.36), sharding={devices=[1,2,1]0,1}
get-tuple-element.188 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=3, sharding={devices=[1,1,2]0,1}
get-tuple-element.189 = f32[3,4,128]{2,1,0} get-tuple-element(wide_param.7), index=4, sharding={devices=[1,2,1]0,1}
dynamic-slice.19 = f32[1,4,128]{2,1,0} dynamic-slice(get-tuple-element.189, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,128}, sharding={devices=[1,2,1]0,1}
reshape.26 = f32[4,128]{1,0} reshape(dynamic-slice.19), sharding={devices=[2,1]0,1}
dot.23 = f32[128,32]{0,1} dot(reshape.26, multiply.3), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
reshape.27 = f32[1,128,32]{2,1,0} reshape(dot.23), sharding={devices=[1,1,2]0,1}
dynamic-update-slice.7 = f32[3,128,32]{2,1,0} dynamic-update-slice(get-tuple-element.188, reshape.27, select.4, constant.36, constant.36), sharding={devices=[1,1,2]0,1}
ROOT tuple.19 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(add.14, dot.21, dynamic-update-slice.6, dynamic-update-slice.7, get-tuple-element.189, get-tuple-element.190, get-tuple-element.191, get-tuple-element.192, get-tuple-element.193), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
}
wide.wide.region_4.218.clone.clone {
wide_param.6 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.184 = s32[] get-tuple-element(wide_param.6), index=0, sharding={replicated}
constant.28 = s32[] constant(3)
ROOT compare.6 = pred[] compare(get-tuple-element.184, constant.28), direction=LT
}
ENTRY entry {
Arg_1.2 = f32[3,32,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
constant.45 = s32[] constant(0), sharding={replicated}
constant.23 = f32[] constant(1), sharding={replicated}
broadcast.24 = f32[4,32]{1,0} broadcast(constant.23), dimensions={}, sharding={devices=[1,2]0,1}
constant.21 = f32[] constant(0), sharding={replicated}
broadcast.22 = f32[3,32,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.20 = f32[3,128,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,1,2]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
broadcast.28 = f32[3,4,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.26 = f32[3,4,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
Arg_2.3 = f32[3,128,32]{2,1,0} parameter(1), sharding={devices=[1,1,2]0,1}
tuple.42 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(constant.45, copy, broadcast.28, broadcast.26, broadcast.26, Arg_1.2, Arg_2.3), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
while.118 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) while(tuple.42), condition=region_1.107, body=region_0.52, sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.179 = f32[3,4,128]{2,1,0} get-tuple-element(while.118), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.180 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=3, sharding={devices=[1,2,1]0,1}
get-tuple-element.183 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=4, sharding={devices=[1,2,1]0,1}
tuple.18 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(constant.45, broadcast.24, broadcast.22, broadcast.20, get-tuple-element.179, get-tuple-element.180, Arg_1.2, Arg_2.3, get-tuple-element.183), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
while.3 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) while(tuple.18), condition=wide.wide.region_4.218.clone.clone, body=wide.wide.region_3.156.clone.clone, sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.234 = f32[3,32,128]{2,1,0} get-tuple-element(while.3), index=2, sharding={devices=[1,2,1]0,1}
constant.16 = f32[] constant(-0.01), sharding={replicated}
broadcast.17 = f32[3,32,128]{2,1,0} broadcast(constant.16), dimensions={}, sharding={devices=[1,2,1]0,1}
multiply.243 = f32[3,32,128]{2,1,0} multiply(get-tuple-element.234, broadcast.17), sharding={devices=[1,2,1]0,1}
add.255 = f32[3,32,128]{2,1,0} add(Arg_1.2, multiply.243), sharding={devices=[1,2,1]0,1}
get-tuple-element.235 = f32[3,128,32]{2,1,0} get-tuple-element(while.3), index=3, sharding={devices=[1,1,2]0,1}
broadcast.15 = f32[3,128,32]{2,1,0} broadcast(constant.16), dimensions={}, sharding={devices=[1,1,2]0,1}
multiply.244 = f32[3,128,32]{2,1,0} multiply(get-tuple-element.235, broadcast.15), sharding={devices=[1,1,2]0,1}
add.256 = f32[3,128,32]{2,1,0} add(Arg_2.3, multiply.244), sharding={devices=[1,1,2]0,1}
get-tuple-element.120 = f32[4,32]{1,0} get-tuple-element(while.118), index=1, sharding={devices=[2,1]0,1}
reduce.130 = f32[] reduce(get-tuple-element.120, constant.21), dimensions={0,1}, to_apply=region_2.126, sharding={replicated}
ROOT tuple.271 = (f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[]) tuple(add.255, add.256, reduce.130), sharding={{devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(),
"get-tuple-element.55"));
EXPECT_TRUE(
IsWeight(*hlo_value_semantics_analysis, module.get(), "reshape.74"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_TRUE(
IsWeight(*hlo_value_semantics_analysis, module.get(), "reshape.79"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1"));
EXPECT_TRUE(
IsWeight(*hlo_value_semantics_analysis, module.get(), "reshape.22"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "reshape.95"));
EXPECT_TRUE(IsStatic(*hlo_value_semantics_analysis, module.get(),
"dynamic-update-slice.99"));
EXPECT_TRUE(IsStatic(*hlo_value_semantics_analysis, module.get(),
"get-tuple-element.180"));
EXPECT_TRUE(IsStatic(*hlo_value_semantics_analysis, module.get(),
"get-tuple-element.190"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "reshape.21"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "multiply.3"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.20"));
EXPECT_TRUE(
IsWeight(*hlo_value_semantics_analysis, module.get(), "reshape.23"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.21"));
EXPECT_TRUE(
IsWeight(*hlo_value_semantics_analysis, module.get(), "reshape.24"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.22"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "reshape.26"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.23"));
}
TEST_F(HloValueSemanticsAnalysisTest, ConvWithClamp) {
const std::string module_str = R"(
HloModule ConvWithClamp
ENTRY entry {
constant.123 = bf16[]{:T(256)} constant(127)
constant.127 = bf16[]{:T(256)} constant(-128)
arg_0 = bf16[128,14,14,1024]{3,0,2,1:T(8,128)(2,1)} parameter(0)
broadcast.819 = bf16[1,1,1024,512]{3,2,1,0:T(8,128)(2,1)} broadcast(constant.127), dimensions={}
arg_1 = bf16[1,1,1024,512]{3,2,1,0:T(8,128)(2,1)} parameter(1)
broadcast.818 = bf16[1,1,1024,512]{3,2,1,0:T(8,128)(2,1)} broadcast(constant.123), dimensions={}
clamp.42 = bf16[1,1,1024,512]{3,2,1,0:T(8,128)(2,1)} clamp(broadcast.819, arg_1, broadcast.818)
round-nearest-even.42 = bf16[1,1,1024,512]{3,2,1,0:T(8,128)(2,1)} round-nearest-even(clamp.42)
convert.219 = s8[1,1,1024,512]{3,2,1,0:T(8,128)(4,1)} convert(round-nearest-even.42)
ROOT convolution.43 = bf16[128,14,14,512]{3,0,2,1:T(8,128)(2,1)} convolution(arg_0, convert.219), window={size=1x1}, dim_labels=b01f_01io->b01f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str,
1,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(
IsWeight(*hlo_value_semantics_analysis, module.get(), "convert.219"));
}
TEST_F(HloValueSemanticsAnalysisTest, MnistTrainingLoop) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kMnistHlo,
1,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.63"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.67"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.71"));
EXPECT_TRUE(
IsWeightGradient(*hlo_value_semantics_analysis, module.get(), "dot.85"));
EXPECT_TRUE(IsActivationGradient(*hlo_value_semantics_analysis, module.get(),
"dot.89"));
EXPECT_TRUE(
IsWeightGradient(*hlo_value_semantics_analysis, module.get(), "dot.92"));
EXPECT_TRUE(IsActivationGradient(*hlo_value_semantics_analysis, module.get(),
"dot.96"));
EXPECT_TRUE(
IsWeightGradient(*hlo_value_semantics_analysis, module.get(), "dot.99"));
}
class EinsumDepthAnalysisTest : public HloTestBase {
public:
int GetInstructionDepth(const EinsumDepthMap& depth_map,
HloComputation* computation, absl::string_view name) {
HloInstruction* instruction = computation->GetInstructionWithName(name);
auto depth_iter = depth_map.find(instruction);
EXPECT_NE(depth_iter, depth_map.end());
return depth_iter->second.element({});
}
};
TEST_F(EinsumDepthAnalysisTest, MnistTrainingLoop) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kMnistHlo,
1,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<EinsumDepthAnalysis> einsum_depth_analysis,
EinsumDepthAnalysis::Run(*module->entry_computation(),
SendRecvGroupMap(*module)));
const EinsumDepthMap& einsum_depth_map =
einsum_depth_analysis->GetEinsumDepthMap();
HloComputation* computation = module->GetComputationWithName("body.49");
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.63"), 5);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.67"), 4);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.71"), 3);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.89"), 2);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.96"), 1);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.92"), 0);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.99"), 0);
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "dot.85"), 0);
}
TEST_F(EinsumDepthAnalysisTest, HandleConditional) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread"
ROOT %async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<EinsumDepthAnalysis> einsum_depth_analysis,
EinsumDepthAnalysis::Run(*module->entry_computation(),
SendRecvGroupMap(*module)));
const EinsumDepthMap& einsum_depth_map =
einsum_depth_analysis->GetEinsumDepthMap();
HloComputation* computation = module->GetComputationWithName("entry");
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "conditional"),
0);
}
TEST_F(EinsumDepthAnalysisTest, HandleAfterAll) {
const char* const hlo_string = R"(
ENTRY entry {
after-all.1 = token[] after-all()
parameter.1 = f32[] parameter(0)
send.1 = (f32[], u32[], token[]) send(parameter.1, after-all.1), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_handler_name="tf_rendezvous",_xla_host_transfer_rendezvous="rendezvous1"}
send-done.1 = token[] send-done(send.1), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_handler_name="tf_rendezvous",_xla_host_transfer_rendezvous="rendezvous1"}
ROOT after-all.2 = token[] after-all(send-done.1), frontend_attributes={_xla_host_transfer_handler_name="tf_rendezvous",_xla_host_transfer_rendezvous="rendezvous1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<EinsumDepthAnalysis> einsum_depth_analysis,
EinsumDepthAnalysis::Run(*module->entry_computation(),
SendRecvGroupMap(*module)));
const EinsumDepthMap& einsum_depth_map =
einsum_depth_analysis->GetEinsumDepthMap();
HloComputation* computation = module->GetComputationWithName("entry");
EXPECT_EQ(GetInstructionDepth(einsum_depth_map, computation, "after-all.2"),
0);
}
class EinsumHeightAnalysisTest : public HloTestBase {
public:
int GetInstructionHeight(const EinsumHeightMap& height_map,
HloComputation* computation,
absl::string_view name) {
HloInstruction* instruction = computation->GetInstructionWithName(name);
auto height_iter = height_map.find(instruction);
EXPECT_NE(height_iter, height_map.end());
return height_iter->second.element({});
}
};
TEST_F(EinsumHeightAnalysisTest, MnistTrainingLoop) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kMnistHlo,
1,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<EinsumHeightAnalysis> einsum_height_analysis,
EinsumHeightAnalysis::Run(*module->entry_computation(),
SendRecvGroupMap(*module)));
const EinsumHeightMap& einsum_height_map =
einsum_height_analysis->GetEinsumHeightMap();
HloComputation* computation = module->GetComputationWithName("body.49");
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.63"), 1);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.67"), 2);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.71"), 3);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.89"), 4);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.96"), 5);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.92"), 5);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.99"), 6);
EXPECT_EQ(GetInstructionHeight(einsum_height_map, computation, "dot.85"), 4);
}
TEST_F(HloValueSemanticsAnalysisTest,
HandleIncompleteForeignThreadComputation) {
constexpr std::string_view hlo = R"(
HloModule Module
ENTRY entry {
foreign-call-start = ((), s32[], s32[]) custom-call-start(), custom_call_target="ThreadSpecificCustomCall", async_execution_thread="foreign_thread"
ROOT foreign-call-done = s32[] custom-call-done(foreign-call-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(
*module,
{HloInstruction::kMainExecutionThread}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_value_semantics_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_value_semantics_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e7f6450c-05e8-4958-bf2e-9669cd67d5e5 | cpp | tensorflow/tensorflow | simplify_fp_conversions | third_party/xla/xla/service/simplify_fp_conversions.cc | third_party/xla/xla/service/gpu/tests/simplify_fp_conversions_test.cc | #include "xla/service/simplify_fp_conversions.h"
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<bool> RunOnComputation(HloComputation& computation) {
bool changed = false;
for (HloInstruction* instruction : computation.MakeInstructionPostOrder()) {
HloInstruction* input = instruction;
size_t convert_chain_length = 0;
while (input->opcode() == HloOpcode::kConvert &&
primitive_util::IsFloatingPointType(input->shape().element_type())) {
input = input->mutable_operand(0);
++convert_chain_length;
}
if (convert_chain_length < 2) {
continue;
}
if (instruction->shape().element_type() == input->shape().element_type()) {
TF_RETURN_IF_ERROR(
instruction->parent()->ReplaceInstruction(instruction, input));
} else {
TF_RETURN_IF_ERROR(instruction->parent()->ReplaceWithNewInstruction(
instruction,
HloInstruction::CreateConvert(instruction->shape(), input)));
}
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> SimplifyFPConversions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, absl::StrFormat("SimplifyFPConversions::Run() with before:\n%s",
module->ToString()));
bool changed = false;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(*computation));
changed |= comp_changed;
}
XLA_VLOG_LINES(2,
absl::StrFormat("SimplifyFPConversions::Run() with after:\n%s",
module->ToString()));
return changed;
}
} | #include <string_view>
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
namespace {
class SimplifyFPConversionsTest : public HloTestBase {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_allow_excess_precision(
enable_simplify_all_fp_conversions_);
return debug_options;
}
bool SupportsMultiplyBF16() {
const auto& device_description =
backend().default_stream_executor()->GetDeviceDescription();
const auto& cc = device_description.gpu_compute_capability();
return std::holds_alternative<se::CudaComputeCapability>(cc) &&
std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper();
}
void SetEnableSimplifyFpConversions(bool enable_simplify_all_fp_conversions) {
enable_simplify_all_fp_conversions_ = enable_simplify_all_fp_conversions;
}
static constexpr std::string_view kHloText = R"(
HloModule module
ENTRY main {
param0 = bf16[1536]{0} parameter(0)
param1 = bf16[4,1536]{1,0} parameter(1)
s = bf16[1536]{0} rsqrt(param0)
b = bf16[4,1536]{1,0} broadcast(s), dimensions={1}
ROOT d = bf16[4,1536]{1,0} multiply(b, param1)
}
)";
private:
bool enable_simplify_all_fp_conversions_ = false;
};
TEST_F(SimplifyFPConversionsTest, RedundantTypeConversionsGetCleanedUp) {
SetEnableSimplifyFpConversions(true);
if (SupportsMultiplyBF16()) {
MatchOptimizedHlo(kHloText, R"(
)");
} else {
MatchOptimizedHlo(kHloText, R"(
)");
}
}
TEST_F(SimplifyFPConversionsTest, RedundantTypeConversionsArePresentInTest) {
if (SupportsMultiplyBF16()) {
GTEST_SKIP() << "No double convert is expected on Hopper";
}
SetEnableSimplifyFpConversions(false);
MatchOptimizedHlo(kHloText, R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/simplify_fp_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/tests/simplify_fp_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a00ebe9-a063-4714-bc93-24ea5bf8ed70 | cpp | tensorflow/tensorflow | fusion_constant_sinking | third_party/xla/xla/service/fusion_constant_sinking.cc | third_party/xla/xla/service/fusion_constant_sinking_test.cc | #include "xla/service/fusion_constant_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
bool CanSink(HloInstruction* fusion, const HloInstruction* operand) {
if (!fusion->IsLoopFusion() && !fusion->IsOutputFusion()) {
return false;
}
if (fusion->operand_count() == 1) {
return false;
}
if (!ShapeUtil::IsScalar(operand->shape()) || !operand->IsConstant()) {
return false;
}
int64_t operand_idx = fusion->operand_index(operand);
HloInstruction* fused_param = fusion->fused_parameter(operand_idx);
for (HloInstruction* user : fused_param->users()) {
if (user->opcode() == HloOpcode::kFusion && user->operand_count() == 1) {
return false;
}
}
return true;
}
bool ProcessScalar(HloInstruction* scalar) {
if (!ShapeUtil::IsScalar(scalar->shape()) || !scalar->IsConstant()) {
return false;
}
bool processed = false;
std::vector<HloInstruction*> sinkable_users;
for (HloInstruction* use : scalar->users()) {
if (CanSink(use, scalar)) {
sinkable_users.push_back(use);
}
}
for (HloInstruction* use : sinkable_users) {
HloInstruction* fused_scalar = use->FuseInstruction(scalar);
processed = true;
ProcessScalar(fused_scalar);
}
return processed;
}
absl::StatusOr<bool> FusionConstantSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(3) << "HLO module before FusionConstantSinking:";
XLA_VLOG_LINES(3, module->ToString());
bool changed = false;
for (HloComputation* c : module->MakeNonfusionComputations()) {
for (HloInstruction* i : c->MakeInstructionPostOrder()) {
changed |= ProcessScalar(i);
}
}
if (changed) {
TF_ASSIGN_OR_RETURN(bool dce, HloDCE{}.Run(module, execution_threads));
changed |= dce;
}
VLOG(3) << "HLO module after FusionConstantSinking:";
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
} | #include "xla/service/fusion_constant_sinking.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using FusionConstantSinkingTest = HloTestBase;
TEST_F(FusionConstantSinkingTest, SinkConstant) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[56,4096,4096], param_1: s32[]) -> s8[1,4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%constant.85694 = s32[]{:T(128)} constant(0)
ROOT %dynamic-slice.22040 = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} dynamic-slice(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117, s32[]{:T(128)} p1, s32[]{:T(128)} %constant.85694, s32[]{:T(128)} %constant.85694), dynamic_slice_sizes={1,4096,4096}
}
ENTRY main {
p0 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c), kind=kLoop, calls=%fused_computation.slice
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(
module->GetComputationWithName("fused_computation.slice")
->root_instruction(),
GmockMatch(match::DynamicSlice(match::Parameter(0), match::Constant(),
match::Constant(), match::Constant())));
}
TEST_F(FusionConstantSinkingTest, SingleOperandFusionNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation (param_1: s8[]) -> s8[1,4096,4096] {
param0 = s8[] parameter(0)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} broadcast(param0), dimensions={}
}
ENTRY main {
c = s8[]{:T(128)} constant(10)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[]{:T(128)} c), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, SingleOperandUserNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.inner (param_1: s32[]) -> s32[] {
p1 = s32[]{:T(128)} parameter(0)
%constant.85694 = s32[]{:T(128)} constant(10)
ROOT out = s32[] add(p1, %constant.85694)
}
%fused_computation (param_0.51117: s32[4096,4096], param_1:
s32[]) -> s32[4096,4096] {
%param_0.51117 = s32[4096,4096]{1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%inner.fusion = s32[] fusion(s32[]{:T(128)} p1), kind=kLoop, calls=%fused_computation.inner
%broadcast = s32[4096,4096]{1,0:T(8,128)(4,1)} broadcast(%inner.fusion), dimensions={}
ROOT out = s32[4096,4096] add(%broadcast, %param_0.51117)
}
ENTRY main {
p0 = s32[4096,4096]{1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s32[4096,4096]{1,0:T(8,128)(4,1)}
fusion(s32[4096,4096]{1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, NonScalarNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation (param_1: s8[2], p1: s8[2,4096,4096]) -> s8[2,4096,4096] {
param0 = s8[2] parameter(0)
param1 = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(1)
bcast = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} broadcast(param0), dimensions={0}
ROOT out = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} add(param1, bcast)
}
ENTRY main {
p = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s8[2]{0:T(128)} constant({10,20})
ROOT out = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[2]{0:T(128)} c, p), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, SinkConstantNested) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.inner (param_0.51117: s8[56,4096,4096], param_1:
s32[]) -> s8[1,4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%constant.85694 = s32[]{:T(128)} constant(0)
ROOT %dynamic-slice.22040 = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)}
dynamic-slice(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117,
s32[]{:T(128)} p1, s32[]{:T(128)} %constant.85694, s32[]{:T(128)}
%constant.85694), dynamic_slice_sizes={1,4096,4096}
}
%fused_computation (param_0.51117: s8[56,4096,4096], param_1:
s32[]) -> s8[4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%inner.fusion = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117, s32[]{:T(128)} p1), kind=kLoop, calls=%fused_computation.inner
ROOT %bitcast = s8[4096,4096]{1,0:T(8,128)(4,1)} bitcast(s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} %inner.fusion)
}
ENTRY main {
p0 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s8[4096,4096]{1,0:T(8,128)(4,1)}
fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c),
kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(
module->GetComputationWithName("fused_computation")->num_parameters(), 1);
EXPECT_THAT(module->GetComputationWithName("fused_computation.inner")
->num_parameters(),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_constant_sinking.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_constant_sinking_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be5eb669-1f38-45f4-bdbc-69ad464cf683 | cpp | tensorflow/tensorflow | select_and_scatter_expander | third_party/xla/xla/service/select_and_scatter_expander.cc | third_party/xla/xla/service/select_and_scatter_expander_test.cc | #include "xla/service/select_and_scatter_expander.h"
#include <numeric>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
namespace xla {
absl::StatusOr<HloInstruction*> SelectAndScatterExpander::ExpandInstruction(
HloInstruction* instruction) {
auto* computation = instruction->parent();
auto* sas = Cast<HloSelectAndScatterInstruction>(instruction);
auto* operand = sas->mutable_operand(0);
auto operand_shape = operand->shape();
auto* source = sas->mutable_operand(1);
auto* select = sas->select();
auto* init_value = sas->mutable_operand(2);
const auto iota_shape = ShapeUtil::ChangeElementType(operand_shape, S32);
const auto scalar_operand =
ShapeUtil::MakeScalarShape(operand->shape().element_type());
const auto scalar_iota =
ShapeUtil::MakeScalarShape(iota_shape.element_type());
const auto source_shape = source->shape();
const Shape iota_shape_reduced =
ShapeUtil::ChangeElementType(source_shape, S32);
std::vector<HloInstruction*> iotas;
iotas.reserve(operand_shape.rank());
for (int i = 0; i < operand_shape.rank(); ++i) {
iotas.push_back(
computation->AddInstruction(HloInstruction::CreateIota(iota_shape, i)));
}
HloComputation* new_comp = [&]() -> HloComputation* {
HloComputation::Builder builder(
absl::StrCat(select->name(), ".reduce_window"));
auto rhs_begin = static_cast<int64_t>(iotas.size() + 1);
auto first_iota_index = 1;
auto* neg_one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(-1)));
auto* first_lhs_iota =
builder.AddInstruction(HloInstruction::CreateParameter(
first_iota_index, scalar_iota, "iota_lhs"));
auto* first_rhs_iota =
builder.AddInstruction(HloInstruction::CreateParameter(
first_iota_index + rhs_begin, scalar_iota, "iota_lhs"));
auto* lhs_first_in_window =
builder.AddInstruction(HloInstruction::CreateCompare(
sas->select()->root_instruction()->shape(), first_lhs_iota, neg_one,
Comparison::Direction::kNe, {}));
auto* rhs_first_in_window =
builder.AddInstruction(HloInstruction::CreateCompare(
sas->select()->root_instruction()->shape(), first_rhs_iota, neg_one,
Comparison::Direction::kNe, {}));
auto rhs_not_first_in_window = builder.AddInstruction(
HloInstruction::CreateUnary(sas->select()->root_instruction()->shape(),
HloOpcode::kNot, rhs_first_in_window));
auto* operand_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_operand, "operand_lhs"));
auto* operand_rhs = builder.AddInstruction(HloInstruction::CreateParameter(
rhs_begin, scalar_operand, "operand_rhs"));
auto* call = builder.AddInstruction(
HloInstruction::CreateCall(sas->select()->root_instruction()->shape(),
{operand_lhs, operand_rhs}, sas->select()));
auto* pred = builder.AddInstruction(HloInstruction::CreateBinary(
call->shape(), HloOpcode::kAnd, call, lhs_first_in_window));
pred = builder.AddInstruction(HloInstruction::CreateBinary(
call->shape(), HloOpcode::kOr, pred, rhs_not_first_in_window));
std::vector<HloInstruction*> result_tuple;
result_tuple.push_back(builder.AddInstruction(HloInstruction::CreateTernary(
scalar_operand, HloOpcode::kSelect, pred, operand_lhs, operand_rhs)));
for (auto i = first_iota_index; i < rhs_begin; ++i) {
xla::HloInstruction *iota_lhs, *iota_rhs;
if (i == first_iota_index) {
iota_lhs = first_lhs_iota;
iota_rhs = first_rhs_iota;
} else {
iota_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(i, scalar_iota, "iota_lhs"));
iota_rhs = builder.AddInstruction(HloInstruction::CreateParameter(
i + rhs_begin, scalar_iota, "iota_rhs"));
}
result_tuple.push_back(
builder.AddInstruction(HloInstruction::CreateTernary(
scalar_iota, HloOpcode::kSelect, pred, iota_lhs, iota_rhs)));
}
builder.AddInstruction(HloInstruction::CreateTuple(result_tuple));
auto* result = select->parent()->AddEmbeddedComputation(builder.Build());
if (!CallInliner::Inline(call).ok()) {
return nullptr;
}
return result;
}();
if (!new_comp) {
return nullptr;
}
auto num_reduce_values = iotas.size() + 1;
std::vector<HloInstruction*> ops;
ops.reserve(num_reduce_values);
ops.push_back(operand);
ops.insert(ops.end(), iotas.begin(), iotas.end());
auto* neg_one = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(-1)));
std::vector<HloInstruction*> reduce_init_values;
reduce_init_values.reserve(num_reduce_values);
reduce_init_values.push_back(init_value);
for (auto i = 0; i < iotas.size(); ++i) {
reduce_init_values.push_back(neg_one);
}
std::vector<xla::Shape> shapes;
shapes.reserve(num_reduce_values);
shapes.push_back(source->shape());
for (auto i = 0; i < iotas.size(); ++i) {
shapes.push_back(iota_shape_reduced);
}
auto* reduce_window =
computation->AddInstruction(HloInstruction::CreateReduceWindow(
ShapeUtil::MakeTupleShape(shapes), ops, reduce_init_values,
sas->window(), new_comp));
std::vector<HloInstruction*> iota_indices;
std::vector<int64_t> broadcasted_iota_dims;
broadcasted_iota_dims.reserve(iota_shape_reduced.rank() + 1);
broadcasted_iota_dims.insert(broadcasted_iota_dims.end(),
iota_shape_reduced.dimensions().begin(),
iota_shape_reduced.dimensions().end());
broadcasted_iota_dims.push_back(1);
auto broadcasted_iota_shape = ShapeUtil::MakeShape(
iota_shape_reduced.element_type(), broadcasted_iota_dims);
for (int i = 1; i < num_reduce_values; ++i) {
auto* element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(reduce_window, i));
iota_indices.push_back(computation->AddInstruction(
HloInstruction::CreateReshape(broadcasted_iota_shape, element)));
}
std::vector<int64_t> scatter_dims(operand->shape().rank());
std::iota(scatter_dims.begin(), scatter_dims.end(), 0);
auto* broadcasted_init_value = computation->AddInstruction(
HloInstruction::CreateBroadcast(instruction->shape(), init_value, {}));
std::vector<int64_t> concatenated_iotas_dims;
concatenated_iotas_dims.reserve(iota_indices.front()->shape().rank());
concatenated_iotas_dims.insert(concatenated_iotas_dims.end(),
broadcasted_iota_dims.begin(),
broadcasted_iota_dims.end());
concatenated_iotas_dims.back() = static_cast<int64_t>(iota_indices.size());
auto* indices = computation->AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(iota_shape.element_type(), concatenated_iotas_dims),
iota_indices, iota_shape.rank()));
ScatterDimensionNumbers dim_nums =
HloScatterInstruction::MakeScatterDimNumbers(
{},
scatter_dims,
scatter_dims,
source->shape().rank());
return computation->AddInstruction(HloInstruction::CreateScatter(
sas->shape(), broadcasted_init_value,
indices, source,
sas->scatter(), dim_nums,
false, false));
}
bool SelectAndScatterExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kSelectAndScatter;
}
} | #include "xla/service/select_and_scatter_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr absl::string_view kModuleStr =
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
})";
class SelectAndScatterExpanderTest : public HloTestBase {
protected:
void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {
HloInstruction* inst = FindInstruction(module, inst_name);
inst->mutable_shape()->clear_layout();
}
};
TEST_F(SelectAndScatterExpanderTest, ReplacesSelectAndScatter) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RunAndFilecheckHloRewrite(kModuleStr, SelectAndScatterExpander(), R"(
CHECK-NOT: select-and-scatter
)");
}
TEST_F(SelectAndScatterExpanderTest, CreatesReduceAndScatter) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RunAndFilecheckHloRewrite(kModuleStr, SelectAndScatterExpander(), R"(
CHECK: reduce
CHECK: scatter
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/select_and_scatter_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/select_and_scatter_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d37885f2-9e7f-4cba-b154-6cdd91c29378 | cpp | tensorflow/tensorflow | while_loop_all_reduce_code_motion | third_party/xla/xla/service/while_loop_all_reduce_code_motion.cc | third_party/xla/xla/service/while_loop_all_reduce_code_motion_test.cc | #include "xla/service/while_loop_all_reduce_code_motion.h"
#include <memory>
#include <optional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct AccumulationContext {
HloInstruction* accumulation_instruction;
HloInstruction* accumulation_buffer;
int64_t param_tuple_index;
std::optional<HloInstruction*> dynamic_slice;
std::optional<HloInstruction*> dynamic_update_slice;
};
struct MovableAllReduceContext {
bool is_movable;
std::vector<AccumulationContext> accumulation_contexts;
};
bool IsZero(const HloInstruction* hlo) {
if (hlo->IsConstant() && hlo->shape().rank() == 0 &&
hlo->literal().IsZero({})) {
return true;
}
if (hlo->opcode() == HloOpcode::kBroadcast) {
return IsZero(hlo->operand(0));
}
return false;
}
bool IsValueReplicatedWithinEachAllReduceGroup(
const HloInstruction& instruction, const ShapeIndex& index,
CollectiveOpGroupMode all_reduce_group_mode,
absl::Span<const ReplicaGroup> replica_groups, int num_replicas,
int num_partitions,
const std::unique_ptr<HloReplicationAnalysis>&
cross_replica_replication_analysis,
const std::unique_ptr<HloReplicationAnalysis>&
cross_partition_replication_analysis) {
VLOG(5) << "IsValueReplicatedWithinEachAllReduceGroup,"
<< " all_reduce_group_mode: "
<< CollectiveOpGroupModeToString(all_reduce_group_mode);
switch (all_reduce_group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
return cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups);
}
case CollectiveOpGroupMode::kCrossPartition: {
return cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups);
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
return (cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups)) &&
(cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis
->HloInstructionIsReplicatedAt(&instruction, index));
}
case CollectiveOpGroupMode::kFlattenedID: {
if (num_replicas == 1) {
return cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis
->HloInstructionIsReplicatedAt(&instruction, index,
replica_groups);
}
if (num_partitions == 1) {
return cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index, replica_groups);
}
return (cross_replica_replication_analysis == nullptr ||
cross_replica_replication_analysis->HloInstructionIsReplicatedAt(
&instruction, index)) &&
(cross_partition_replication_analysis == nullptr ||
cross_partition_replication_analysis
->HloInstructionIsReplicatedAt(&instruction, index));
}
}
}
HloInstruction* GetEffectiveScalar(HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kBroadcast) {
return nullptr;
}
HloInstruction* operand = instruction->mutable_operand(0);
if (!ShapeUtil::IsScalar(operand->shape())) {
return nullptr;
}
return operand;
}
MovableAllReduceContext IsAllReduceMovable(
HloAllReduceInstructionBase* all_reduce, HloComputation* while_body,
const std::unique_ptr<HloReplicationAnalysis>&
cross_replica_replication_analysis,
const std::unique_ptr<HloReplicationAnalysis>&
cross_partition_replication_analysis) {
VLOG(4) << "IsAllReduceMovable: " << all_reduce->ToString();
std::optional<ReductionKind> reduction_type =
MatchReductionComputation(all_reduce->to_apply());
const bool all_reduce_is_summation =
reduction_type.has_value() && *reduction_type == ReductionKind::SUM;
const absl::InlinedVector<PrimitiveType, 12> kSupportedTypes{
BF16, F16, F32, F64, S8, S16, S32, S64, U8, U16, U32, U64};
if (!absl::c_linear_search(kSupportedTypes,
all_reduce->shape().element_type()) ||
!all_reduce_is_summation) {
return MovableAllReduceContext{false,
{}};
}
CollectiveOpGroupMode all_reduce_group_mode =
GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(),
all_reduce->use_global_device_ids())
.value();
auto is_value_replicated_within_replica_group =
[&cross_replica_replication_analysis,
&cross_partition_replication_analysis, &all_reduce_group_mode,
all_reduce](const HloInstruction& instruction,
const ShapeIndex& index) -> bool {
bool is_replicated = IsValueReplicatedWithinEachAllReduceGroup(
instruction, index, all_reduce_group_mode, all_reduce->replica_groups(),
all_reduce->GetModule()->config().replica_count(),
all_reduce->GetModule()->config().num_partitions(),
cross_replica_replication_analysis,
cross_partition_replication_analysis);
VLOG(5) << "instruction: " << instruction.name()
<< " is_replicate: " << is_replicated;
return is_replicated;
};
struct BufferTupleIndex {
bool unsupported_operation{false};
std::optional<int64_t> tuple_index;
bool returned_from_computation{false};
std::optional<HloInstruction*> dynamic_slice;
std::optional<HloInstruction*> dynamic_update_slice;
};
const bool is_reduce_scatter =
all_reduce->opcode() == HloOpcode::kReduceScatter;
auto get_origin_tuple_index =
[is_reduce_scatter](HloInstruction* instruction) -> BufferTupleIndex {
VLOG(4) << "get_origin_tuple_index called on " << instruction->ToString();
BufferTupleIndex result;
while (!result.unsupported_operation) {
switch (instruction->opcode()) {
default: {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
break;
}
case HloOpcode::kBitcast:
case HloOpcode::kConvert:
case HloOpcode::kReshape:
case HloOpcode::kTranspose:
if (is_reduce_scatter) {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
} else {
instruction = instruction->mutable_operand(0);
}
break;
case HloOpcode::kGetTupleElement: {
if (result.tuple_index.has_value()) {
result.unsupported_operation = true;
} else {
result.tuple_index =
Cast<HloGetTupleElementInstruction>(instruction)->tuple_index();
instruction = instruction->mutable_operand(0);
}
break;
}
case HloOpcode::kDynamicSlice: {
if (is_reduce_scatter) {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
} else if (result.dynamic_slice.has_value()) {
VLOG(4) << "get_origin_tuple_index, instruction: ("
<< instruction->ToString()
<< "), we do not yet support more than 1 dynamic-slices on"
<< " the accumulation buffer.";
result.unsupported_operation = true;
} else {
result.dynamic_slice = instruction;
instruction = instruction->mutable_operand(0);
}
break;
}
case HloOpcode::kParameter: {
int parameter_number =
Cast<HloParameterInstruction>(instruction)->parameter_number();
CHECK_EQ(parameter_number, 0);
break;
}
}
if (instruction->opcode() == HloOpcode::kParameter) {
break;
}
}
return result;
};
auto get_output_tuple_index =
[is_reduce_scatter](HloInstruction* instruction,
HloComputation* while_body) -> BufferTupleIndex {
VLOG(4) << "get_output_tuple_index called on " << instruction->ToString();
BufferTupleIndex result;
std::stack<HloInstruction*> to_visit;
to_visit.push(instruction);
while (!to_visit.empty() && !result.unsupported_operation) {
HloInstruction* instruction = to_visit.top();
to_visit.pop();
for (HloInstruction* user : instruction->users()) {
switch (user->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kConvert:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTranspose:
case HloOpcode::kSlice: {
if (is_reduce_scatter) {
result.unsupported_operation = true;
} else {
to_visit.push(user);
}
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (result.dynamic_update_slice.has_value() || is_reduce_scatter) {
result.unsupported_operation = true;
} else {
result.dynamic_update_slice = user;
to_visit.push(user);
}
break;
}
case HloOpcode::kTuple: {
if (result.tuple_index.has_value()) {
result.unsupported_operation = true;
} else {
result.tuple_index = user->operand_index(instruction);
if (while_body->root_instruction() == user) {
if (result.returned_from_computation) {
result.unsupported_operation = true;
}
result.returned_from_computation = true;
} else {
to_visit.push(user);
}
}
break;
}
default: {
VLOG(4) << "get_output_tuple_index, instruction: ("
<< instruction->ToString()
<< ") is an unsupported operation on accumulation buffer.";
result.unsupported_operation = true;
}
}
if (result.unsupported_operation) {
break;
}
}
}
return result;
};
auto is_buffer_used =
[&is_value_replicated_within_replica_group, is_reduce_scatter](
absl::Span<const AccumulationContext> accumulation_contexts,
HloComputation* while_body_computation) -> bool {
CHECK_EQ(while_body_computation->num_parameters(), 1);
HloInstruction* parameter_instruction =
while_body_computation->parameter_instruction(0);
for (const auto& accumulation : accumulation_contexts) {
HloInstruction* accumulation_instruction =
accumulation.accumulation_instruction;
int64_t tuple_index = accumulation.param_tuple_index;
std::stack<HloInstruction*> to_visit;
for (HloInstruction* user : parameter_instruction->users()) {
if (auto* gte = DynCast<HloGetTupleElementInstruction>(user)) {
if (gte->tuple_index() == tuple_index) {
to_visit.push(user);
}
} else {
return true;
}
}
while (!to_visit.empty()) {
HloInstruction* instruction = to_visit.top();
to_visit.pop();
for (HloInstruction* user : instruction->users()) {
VLOG(5) << "is_buffer_used, user: " << user->name();
switch (user->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kConvert:
case HloOpcode::kReshape:
case HloOpcode::kTranspose:
if (is_reduce_scatter) {
VLOG(4) << "buffer is used by " << user->ToString()
<< ", preventing the motion of reduce-scatter.";
return true;
}
to_visit.push(user);
break;
case HloOpcode::kSelect: {
if (((user->operand_index(instruction) == 1 &&
IsZero(user->operand(2))) ||
(user->operand_index(instruction) == 2 &&
IsZero(user->operand(1)))) &&
is_value_replicated_within_replica_group(*(user->operand(0)),
{})) {
to_visit.push(user);
} else {
return true;
}
break;
}
case HloOpcode::kAdd: {
if (user != accumulation_instruction) {
return true;
}
break;
}
case HloOpcode::kDynamicSlice: {
if (!accumulation.dynamic_slice.has_value() ||
user != *accumulation.dynamic_slice) {
return true;
}
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (!accumulation.dynamic_update_slice.has_value() ||
user != *accumulation.dynamic_update_slice) {
return true;
}
break;
}
default: {
VLOG(4) << "buffer is used by " << user->ToString()
<< ", preventing the motion of all-reduce.";
return true;
}
}
}
}
}
return false;
};
auto dus_matches_ds_offsets =
[](const HloInstruction& dynamic_slice,
const HloInstruction& dynamic_update_slice) -> bool {
if (dynamic_slice.operand_count() + 1 !=
dynamic_update_slice.operand_count()) {
return false;
}
for (int i = 1; i < dynamic_slice.operand_count(); ++i) {
if (dynamic_slice.operand(i) != dynamic_update_slice.operand(i + 1)) {
return false;
}
}
return true;
};
auto dus_indices_are_replicated =
[&is_value_replicated_within_replica_group](
const HloInstruction& dynamic_update_slice) -> bool {
for (int i = 2; i < dynamic_update_slice.operand_count(); ++i) {
if (!is_value_replicated_within_replica_group(
*dynamic_update_slice.operand(i), {})) {
return false;
}
}
return true;
};
std::vector<AccumulationContext> accumulation_contexts;
std::stack<HloInstruction*> to_visit;
bool is_all_reduce_movable = true;
to_visit.push(all_reduce);
while (!to_visit.empty() && is_all_reduce_movable) {
HloInstruction* instruction = to_visit.top();
to_visit.pop();
for (HloInstruction* user : instruction->users()) {
switch (user->opcode()) {
case HloOpcode::kConvert:
to_visit.push(user);
break;
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
case HloOpcode::kTranspose:
case HloOpcode::kSlice: {
if (is_reduce_scatter) {
is_all_reduce_movable = false;
} else {
to_visit.push(user);
}
break;
}
case HloOpcode::kSelect: {
bool is_select_ok = [&]() {
bool operand_1_match = user->operand_index(instruction) == 1 &&
IsZero(user->operand(2));
bool operand_2_match = user->operand_index(instruction) == 2 &&
IsZero(user->operand(1));
if (!operand_1_match && !operand_2_match) {
return false;
}
if (!is_reduce_scatter) {
return true;
}
HloInstruction* predicate = user->mutable_operand(0);
return GetEffectiveScalar(predicate) != nullptr;
}();
if (is_select_ok) {
to_visit.push(user);
} else {
is_all_reduce_movable = false;
}
break;
}
case HloOpcode::kAdd: {
int64_t buffer_index = 1 - user->operand_index(instruction);
HloInstruction* accumulation_buffer =
user->mutable_operand(buffer_index);
auto origin_buffer_tuple_index =
get_origin_tuple_index(accumulation_buffer);
if (origin_buffer_tuple_index.unsupported_operation) {
is_all_reduce_movable = false;
break;
}
auto output_buffer_tuple_index =
get_output_tuple_index(user, while_body);
if (!output_buffer_tuple_index.unsupported_operation &&
output_buffer_tuple_index.returned_from_computation &&
origin_buffer_tuple_index.tuple_index.has_value() &&
output_buffer_tuple_index.tuple_index.has_value() &&
origin_buffer_tuple_index.tuple_index ==
output_buffer_tuple_index.tuple_index &&
(origin_buffer_tuple_index.dynamic_slice.has_value() ==
output_buffer_tuple_index.dynamic_update_slice.has_value()) &&
(!origin_buffer_tuple_index.dynamic_slice.has_value() ||
(dus_matches_ds_offsets(
**origin_buffer_tuple_index.dynamic_slice,
**output_buffer_tuple_index.dynamic_update_slice) &&
dus_indices_are_replicated(
**output_buffer_tuple_index.dynamic_update_slice)))) {
accumulation_contexts.push_back(AccumulationContext{
user, accumulation_buffer,
*output_buffer_tuple_index.tuple_index,
origin_buffer_tuple_index.dynamic_slice,
output_buffer_tuple_index.dynamic_update_slice});
} else {
is_all_reduce_movable = false;
}
break;
}
default: {
VLOG(4) << "get_accumulation_contexts, all-reduce result is used "
<< " by " << user->ToString() << ", not movable.";
is_all_reduce_movable = false;
}
}
}
}
if (is_buffer_used(accumulation_contexts, while_body)) {
is_all_reduce_movable = false;
}
return MovableAllReduceContext{is_all_reduce_movable, accumulation_contexts};
}
struct WhileInitContext {
HloInstruction* while_init{nullptr};
absl::flat_hash_map<int, HloInstruction*> tuple_index_to_old_buffer;
};
WhileInitContext CreateNewWhileInit(
HloInstruction* old_while_instruction,
const HloInstructionMap<std::vector<AccumulationContext>>&
all_reduce_to_accumulations) {
HloInstruction* old_while_init = old_while_instruction->mutable_operand(0);
HloComputation* while_parent = old_while_instruction->parent();
std::vector<HloInstruction*> new_while_init_elements(
old_while_init->operand_count(), nullptr);
for (const auto& all_reduce_and_accumulations_pair :
all_reduce_to_accumulations) {
const std::vector<AccumulationContext>& accumulations =
all_reduce_and_accumulations_pair.second;
HloInstruction* loop_all_reduce = all_reduce_and_accumulations_pair.first;
for (auto& accumulation_context : accumulations) {
int64_t tuple_index = accumulation_context.param_tuple_index;
HloInstruction* old_buffer = old_while_init->mutable_operand(tuple_index);
const Shape& accumulation_shape =
loop_all_reduce->opcode() == HloOpcode::kAllReduce
? old_buffer->shape()
: loop_all_reduce->operand(0)->shape();
HloInstruction* new_buffer = while_parent->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromDimensions(
accumulation_shape.element_type(),
accumulation_shape.dimensions())));
new_while_init_elements[tuple_index] = new_buffer;
}
}
absl::flat_hash_map<int, HloInstruction*> tuple_index_to_old_buffer;
for (int i = 0; i < old_while_init->operand_count(); i++) {
if (!new_while_init_elements[i]) {
new_while_init_elements[i] = old_while_init->mutable_operand(i);
} else {
tuple_index_to_old_buffer[i] = old_while_init->mutable_operand(i);
}
}
HloInstruction* new_while_init = while_parent->AddInstruction(
HloInstruction::CreateTuple(new_while_init_elements));
return WhileInitContext{new_while_init, tuple_index_to_old_buffer};
}
absl::Status ChangeAccumulatorShapesInLoopBodies(
HloInstruction* old_while_instruction,
const HloInstructionMap<std::vector<AccumulationContext>>&
all_reduce_to_accumulations) {
HloComputation* body = old_while_instruction->while_body();
HloComputation* cond = old_while_instruction->while_condition();
absl::flat_hash_map<Shape, HloInstruction*> zeros;
auto create_zero_of_shape = [&zeros, body](const Shape& shape) {
auto it = zeros.find(shape);
if (it != zeros.end()) {
return it->second;
}
HloInstruction* zero = body->AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(shape)));
zeros[shape] = zero;
return zero;
};
for (const auto& [loop_reduce_scatter, accumulations] :
all_reduce_to_accumulations) {
if (loop_reduce_scatter->opcode() != HloOpcode::kReduceScatter) {
continue;
}
const Shape& accumulation_shape = loop_reduce_scatter->operand(0)->shape();
for (auto& accumulation_context : accumulations) {
const int64_t tuple_index = accumulation_context.param_tuple_index;
HloInstruction* param_body = body->parameter_instruction(0);
std::vector<Shape> element_shapes = param_body->shape().tuple_shapes();
element_shapes[tuple_index] = accumulation_shape;
*param_body->mutable_shape() = ShapeUtil::MakeTupleShape(element_shapes);
for (HloInstruction* user : param_body->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
HloGetTupleElementInstruction* gte =
Cast<HloGetTupleElementInstruction>(user);
if (gte->tuple_index() != tuple_index) {
continue;
}
*gte->mutable_shape() = accumulation_shape;
for (HloInstruction* gte_user : gte->users()) {
CHECK_EQ(gte_user->opcode(), HloOpcode::kAdd);
*gte_user->mutable_shape() = accumulation_shape;
}
}
std::vector<HloInstruction*> reduce_scatter_users =
loop_reduce_scatter->users();
while (!reduce_scatter_users.empty()) {
HloInstruction* user = reduce_scatter_users.back();
reduce_scatter_users.pop_back();
if (user->opcode() == HloOpcode::kSelect) {
HloInstruction* zero = create_zero_of_shape(accumulation_shape);
HloInstruction* scalar_predicate =
GetEffectiveScalar(user->mutable_operand(0));
Shape pred_shape =
ShapeUtil::ChangeElementType(accumulation_shape, PRED);
HloInstruction* pred =
body->AddInstruction(HloInstruction::CreateBroadcast(
pred_shape, scalar_predicate, {}));
TF_RETURN_IF_ERROR(user->ReplaceOperandWithDifferentShape(0, pred));
HloInstruction *new_operand_1, *new_operand_2;
if (user->operand_index(loop_reduce_scatter) == 1) {
new_operand_1 = loop_reduce_scatter->mutable_operand(0);
new_operand_2 = zero;
} else {
new_operand_1 = zero;
new_operand_2 = loop_reduce_scatter->mutable_operand(0);
}
TF_RETURN_IF_ERROR(
user->ReplaceOperandWithDifferentShape(1, new_operand_1));
TF_RETURN_IF_ERROR(
user->ReplaceOperandWithDifferentShape(2, new_operand_2));
*user->mutable_shape() = accumulation_shape;
} else {
TF_RET_CHECK(user->opcode() == HloOpcode::kAdd);
TF_RET_CHECK(user->shape() == accumulation_shape);
}
}
HloInstruction* root = body->root_instruction();
*root->mutable_shape() = param_body->shape();
HloInstruction* param_cond = cond->parameter_instruction(0);
*param_cond->mutable_shape() = param_body->shape();
}
}
return absl::OkStatus();
}
absl::flat_hash_map<int, HloInstruction*> CreateSinkedAllReduces(
HloInstruction* new_while_instruction,
const HloInstructionMap<std::vector<AccumulationContext>>&
all_reduce_to_accumulations,
const absl::flat_hash_map<int, HloInstruction*>&
tuple_index_to_old_buffer) {
HloComputation* while_parent = new_while_instruction->parent();
absl::flat_hash_map<int, HloInstruction*> tuple_index_to_new_buffer;
for (const auto& all_reduce_and_accumulations_pair :
all_reduce_to_accumulations) {
HloInstruction* loop_all_reduce = all_reduce_and_accumulations_pair.first;
const std::vector<AccumulationContext>& accumulations =
all_reduce_and_accumulations_pair.second;
for (const auto& accumulation_context : accumulations) {
int64_t tuple_index = accumulation_context.param_tuple_index;
const Shape& accumulation_buffer_shape =
new_while_instruction->shape().tuple_shapes(tuple_index);
HloInstruction* accumulation_buffer =
while_parent->AddInstruction(HloInstruction::CreateGetTupleElement(
accumulation_buffer_shape, new_while_instruction, tuple_index));
HloInstruction* all_reduce_operand = accumulation_buffer;
if (!ShapeUtil::SameElementType(loop_all_reduce->shape(),
accumulation_buffer_shape)) {
Shape all_reduce_shape =
ShapeUtil::MakeShape(loop_all_reduce->shape().element_type(),
accumulation_buffer_shape.dimensions());
all_reduce_operand =
while_parent->AddInstruction(HloInstruction::CreateConvert(
all_reduce_shape, accumulation_buffer));
}
HloInstruction* all_reduced_delta;
if (loop_all_reduce->opcode() == HloOpcode::kAllReduce) {
auto* old_all_reduce = Cast<HloAllReduceInstruction>(loop_all_reduce);
all_reduced_delta =
while_parent->AddInstruction(HloInstruction::CreateAllReduce(
all_reduce_operand->shape(), {all_reduce_operand},
old_all_reduce->called_computations()[0],
old_all_reduce->device_list(),
old_all_reduce->constrain_layout(),
hlo_query::NextChannelId(*(while_parent->parent())),
old_all_reduce->use_global_device_ids()));
} else {
auto* old_reduce_scatter =
Cast<HloReduceScatterInstruction>(loop_all_reduce);
all_reduced_delta =
while_parent->AddInstruction(HloInstruction::CreateReduceScatter(
old_reduce_scatter->shape(), {all_reduce_operand},
old_reduce_scatter->called_computations()[0],
old_reduce_scatter->device_list(),
old_reduce_scatter->constrain_layout(),
hlo_query::NextChannelId(*(while_parent->parent())),
old_reduce_scatter->use_global_device_ids(),
old_reduce_scatter->scatter_dimension()));
}
if (!ShapeUtil::SameElementType(all_reduced_delta->shape(),
accumulation_buffer_shape)) {
all_reduced_delta =
while_parent->AddInstruction(HloInstruction::CreateConvert(
accumulation_buffer_shape, all_reduced_delta));
}
CHECK(ContainsKey(tuple_index_to_old_buffer, tuple_index));
HloInstruction* old_buffer = tuple_index_to_old_buffer.at(tuple_index);
CHECK(Shape::Equal().IgnoreLayout()(old_buffer->shape(),
all_reduced_delta->shape()));
HloInstruction* add_to_old_buffer =
while_parent->AddInstruction(HloInstruction::CreateBinary(
all_reduced_delta->shape(), HloOpcode::kAdd, old_buffer,
all_reduced_delta));
tuple_index_to_new_buffer[tuple_index] = add_to_old_buffer;
}
}
return tuple_index_to_new_buffer;
}
HloInstruction* CreateNewWhileResult(
HloInstruction* new_while_instruction,
const absl::flat_hash_map<int, HloInstruction*>&
tuple_index_to_new_buffer) {
HloComputation* while_parent = new_while_instruction->parent();
CHECK(new_while_instruction->shape().IsTuple());
std::vector<HloInstruction*> new_while_result_elements(
new_while_instruction->shape().tuple_shapes_size(), nullptr);
for (int i = 0; i < new_while_result_elements.size(); i++) {
if (ContainsKey(tuple_index_to_new_buffer, i)) {
new_while_result_elements[i] = tuple_index_to_new_buffer.at(i);
} else {
HloInstruction* gte =
while_parent->AddInstruction(HloInstruction::CreateGetTupleElement(
new_while_instruction->shape().tuple_shapes(i),
new_while_instruction, i));
new_while_result_elements[i] = gte;
}
}
HloInstruction* new_while_result = while_parent->AddInstruction(
HloInstruction::CreateTuple(new_while_result_elements));
return new_while_result;
}
absl::Status AddSinkedAllReducesAndReplaceWhile(
HloInstruction* while_instruction,
const HloInstructionMap<std::vector<AccumulationContext>>&
all_reduce_to_accumulations) {
auto new_while_init_context =
CreateNewWhileInit(while_instruction, all_reduce_to_accumulations);
TF_RETURN_IF_ERROR(ChangeAccumulatorShapesInLoopBodies(
while_instruction, all_reduce_to_accumulations));
HloInstruction* new_while_instruction =
while_instruction->parent()->AddInstruction(HloInstruction::CreateWhile(
new_while_init_context.while_init->shape(),
while_instruction->while_condition(), while_instruction->while_body(),
new_while_init_context.while_init));
absl::flat_hash_map<int, HloInstruction*> tuple_index_to_new_buffer =
CreateSinkedAllReduces(new_while_instruction, all_reduce_to_accumulations,
new_while_init_context.tuple_index_to_old_buffer);
HloInstruction* new_while_result =
CreateNewWhileResult(new_while_instruction, tuple_index_to_new_buffer);
TF_RETURN_IF_ERROR(while_instruction->parent()->ReplaceInstruction(
while_instruction, new_while_result));
return absl::OkStatus();
}
}
absl::StatusOr<bool> WhileLoopAllReduceCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool is_changed = false;
if (module->config().num_partitions() > 1 &&
!module->config().use_spmd_partitioning()) {
return false;
}
std::unique_ptr<HloReplicationAnalysis> cross_replica_replication_analysis;
if (module->config().replica_count() > 1) {
VLOG(5) << "num_replicas: " << module->config().replica_count()
<< " run HloReplicationAnalysis across replicas";
TF_ASSIGN_OR_RETURN(cross_replica_replication_analysis,
HloReplicationAnalysis::RunWithPartialReplication(
module, false));
}
std::unique_ptr<HloReplicationAnalysis> cross_partition_replication_analysis;
if (module->config().use_spmd_partitioning() &&
module->config().num_partitions() > 1) {
VLOG(5) << "num_partitions: " << module->config().num_partitions()
<< " run HloReplicationAnalysis across partitions";
TF_ASSIGN_OR_RETURN(cross_partition_replication_analysis,
HloReplicationAnalysis::RunWithPartialReplication(
module, true));
}
uint32_t count_all_reduce = 0, count_reduce_scatter = 0;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
std::vector<HloInstruction*> computation_callers =
call_graph->GetComputationCallers(computation);
std::vector<HloInstruction*> while_caller_instructions;
for (HloInstruction* caller_instruction : computation_callers) {
if (caller_instruction->opcode() == HloOpcode::kWhile &&
caller_instruction->shape().IsTuple() &&
caller_instruction->while_body() == computation) {
while_caller_instructions.push_back(caller_instruction);
}
}
if (while_caller_instructions.empty()) {
continue;
}
std::vector<HloAllReduceInstructionBase*> while_body_all_reduces;
for (HloInstruction* while_body_instruction :
computation->MakeInstructionPostOrder()) {
HloOpcode op = while_body_instruction->opcode();
const bool is_candidate =
(op == HloOpcode::kAllReduce) ||
(enable_reduce_scatter_ && op == HloOpcode::kReduceScatter);
if (!is_candidate) {
continue;
}
auto* all_reduce_instruction =
Cast<HloAllReduceInstructionBase>(while_body_instruction);
if (all_reduce_instruction->constrain_layout()) {
return false;
} else {
while_body_all_reduces.push_back(all_reduce_instruction);
}
}
HloInstructionMap<std::vector<AccumulationContext>>
all_reduce_to_accumulations;
for (HloAllReduceInstructionBase* all_reduce : while_body_all_reduces) {
auto movable_all_reduce_context = IsAllReduceMovable(
all_reduce, computation, cross_replica_replication_analysis,
cross_partition_replication_analysis);
if (movable_all_reduce_context.is_movable) {
all_reduce_to_accumulations[all_reduce] =
std::move(movable_all_reduce_context.accumulation_contexts);
}
VLOG(3) << "WhileLoopAllReduceCodeMotion, all-reduce: "
<< all_reduce->ToString()
<< " is_movable: " << movable_all_reduce_context.is_movable
<< " while loop: " << while_caller_instructions.front()->name()
<< " num_accumulations: "
<< (movable_all_reduce_context.is_movable
? all_reduce_to_accumulations[all_reduce].size()
: 0);
}
if (all_reduce_to_accumulations.empty()) {
continue;
}
for (HloInstruction* while_instruction : while_caller_instructions) {
TF_RETURN_IF_ERROR(AddSinkedAllReducesAndReplaceWhile(
while_instruction, all_reduce_to_accumulations));
is_changed = true;
}
for (const auto& all_reduce_accumulations_pair :
all_reduce_to_accumulations) {
HloInstruction* all_reduce = all_reduce_accumulations_pair.first;
if (all_reduce->opcode() == HloOpcode::kAllReduce) {
count_all_reduce++;
} else {
count_reduce_scatter++;
}
TF_RETURN_IF_ERROR(computation->ReplaceInstructionWithDifferentShape(
all_reduce, all_reduce->mutable_operand(0)));
}
if (!all_reduce_to_accumulations.empty()) {
call_graph = CallGraph::Build(module);
}
}
VLOG(2) << "Hoisted " << count_all_reduce << " all-reduce and "
<< count_reduce_scatter << " reduce-scatter out of while loops";
return is_changed;
}
} | #include "xla/service/while_loop_all_reduce_code_motion.h"
#include <algorithm>
#include <array>
#include <iterator>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ::testing::Ne;
using ::testing::NotNull;
using ::testing::Property;
using ::testing::SizeIs;
class WhileLoopAllReduceCodeMotionTest : public HloTestBase {
public:
template <HloOpcode op>
HloInstruction* find_op(HloComputation* computation) {
return *std::find_if(computation->instructions().begin(),
computation->instructions().end(),
HloPredicateIsOp<op>);
}
};
TEST_F(WhileLoopAllReduceCodeMotionTest, AllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
ASSERT_THAT(moved_all_reduce, NotNull());
EXPECT_THAT(moved_all_reduce->operand(0), op::GetTupleElement());
EXPECT_EQ(DynCast<HloGetTupleElementInstruction>(
moved_all_reduce->mutable_operand(0))
->tuple_index(),
3);
EXPECT_THAT(moved_all_reduce, op::ReplicaGroups({{0, 1, 2, 3}}));
EXPECT_FALSE(moved_all_reduce->constrain_layout());
EXPECT_TRUE(moved_all_reduce->use_global_device_ids());
HloComputation* reduction_computation =
module->GetComputationWithName("reduction");
ASSERT_THAT(reduction_computation, NotNull());
EXPECT_EQ(moved_all_reduce->to_apply(), reduction_computation);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, ReduceScatterAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[4096, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024, 1024] reduce-scatter(f32[4096, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={0}
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %reduce-scatter, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[4096, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[4096, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopAllReduceCodeMotion{true}.Run(
module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::ReduceScatter())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
EXPECT_THAT(accumulation_buffer, op::Shape("f32[4096, 1024]"));
auto* moved_reduce_scatter = DynCast<HloReduceScatterInstruction>(
find_op<HloOpcode::kReduceScatter>(entry));
ASSERT_THAT(moved_reduce_scatter, NotNull());
EXPECT_THAT(moved_reduce_scatter->operand(0), op::GetTupleElement());
EXPECT_EQ(DynCast<HloGetTupleElementInstruction>(
moved_reduce_scatter->mutable_operand(0))
->tuple_index(),
3);
EXPECT_THAT(moved_reduce_scatter, op::ReplicaGroups({{0, 1, 2, 3}}));
EXPECT_FALSE(moved_reduce_scatter->constrain_layout());
EXPECT_TRUE(moved_reduce_scatter->use_global_device_ids());
HloComputation* reduction_computation =
module->GetComputationWithName("reduction");
ASSERT_THAT(reduction_computation, NotNull());
EXPECT_EQ(moved_reduce_scatter->to_apply(), reduction_computation);
}
TEST_F(WhileLoopAllReduceCodeMotionTest,
ReduceScatterAccumulateDisabledByDefault) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[4096, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024, 1024] reduce-scatter(f32[4096, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={0}
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %reduce-scatter, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[4096, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[4096, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[4096, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, AllReduceSliceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[3, 1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024, 1024] get-tuple-element(%param), index=4
%gte.5 = f32[1024, 1024] get-tuple-element(%param), index=5
%all-reduce = f32[3, 1024, 1024] all-reduce(f32[3, 1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%slice.0 = f32[1, 1024, 1024] slice(f32[3, 1024, 1024] %all-reduce), slice={[0:1], [0:1024], [0:1024]}
%reshape.0 = f32[1024, 1024] reshape(f32[1, 1024, 1024] %slice.0)
%slice.1 = f32[1, 1024, 1024] slice(f32[3, 1024, 1024] %all-reduce), slice={[1:2], [0:1024], [0:1024]}
%reshape.1 = f32[1024, 1024] reshape(f32[1, 1024, 1024] %slice.1)
%slice.2 = f32[1, 1024, 1024] slice(f32[3, 1024, 1024] %all-reduce), slice={[2:3], [0:1024], [0:1024]}
%reshape.2 = f32[1024, 1024] reshape(f32[1, 1024, 1024] %slice.2)
%accumulation.0 = f32[1024, 1024] add(f32[1024, 1024] %reshape.0, f32[1024, 1024] %gte.3)
%accumulation.1 = f32[1024, 1024] add(f32[1024, 1024] %reshape.1, f32[1024, 1024] %gte.4)
%accumulation.2 = f32[1024, 1024] add(f32[1024, 1024] %reshape.2, f32[1024, 1024] %gte.5)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation.0, %accumulation.1, %accumulation.2)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[3, 1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer.0 = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%accumulation_buffer.1 = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%accumulation_buffer.2 = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[3, 1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer.0, f32[1024, 1024] %accumulation_buffer.1, f32[1024, 1024] %accumulation_buffer.2)
ROOT %while = (s32[], s32[], f32[3, 1024, 1024], f32[1024, 1024], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
std::vector<HloInstruction*> hoisted_all_reduces;
absl::c_copy_if(module->entry_computation()->instructions(),
std::back_inserter(hoisted_all_reduces),
HloPredicateIsOp<HloOpcode::kAllReduce>);
EXPECT_THAT(hoisted_all_reduces, SizeIs(3));
ASSERT_THAT(
hoisted_all_reduces,
Each(Pointee(Property(&HloInstruction::channel_id, Ne(std::nullopt)))));
absl::flat_hash_set<int> unique_channel_ids = {
hoisted_all_reduces[0]->channel_id().value(),
hoisted_all_reduces[1]->channel_id().value(),
hoisted_all_reduces[2]->channel_id().value()};
EXPECT_THAT(unique_channel_ids, SizeIs(3));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, AllReduceAccumulateUse) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
%gte_while = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
ROOT %multiply = f32[1024, 1024] multiply(f32[1024, 1024] %gte_while, f32[1024, 1024] %param.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* new_root = module->entry_computation()->root_instruction();
ASSERT_THAT(new_root, op::Multiply());
ASSERT_THAT(new_root->operand(0), op::GetTupleElement());
ASSERT_THAT(new_root->operand(0)->operand(0), op::Tuple());
EXPECT_THAT(new_root->operand(0)->operand(0)->operand(3), op::Add());
}
TEST_F(WhileLoopAllReduceCodeMotionTest, RepeatedlyAccumulatedAllReduce) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %gte.3)
%add.0 = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] %accumulation)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %add.0)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, TypeCastAllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%convert.0 = bf16[1024, 1024] convert(f32[1024, 1024] %gte.2)
%all-reduce = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %convert.0), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%convert.1 = f32[1024, 1024] convert(bf16[1024, 1024] %all-reduce)
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %convert.1, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
EXPECT_THAT(moved_all_reduce, op::Shape("bf16[1024, 1024]"));
HloInstruction* add_delta_to_old_buffer = find_op<HloOpcode::kAdd>(entry);
ASSERT_THAT(add_delta_to_old_buffer, NotNull());
EXPECT_THAT(add_delta_to_old_buffer, op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(0),
op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(1),
op::Shape("f32[1024, 1024]"));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, SelectAllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%all-reduce = f32[1024,1024] all-reduce(%gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%const.0 = f32[] constant(0)
%zeros = f32[1024,1024] broadcast(%const.0), dimensions={}
%predicates = pred[1024,1024] custom-call(), custom_call_target="something"
%select = f32[1024,1024] select(%predicates, %zeros, %all-reduce)
%accumulation = f32[1024,1024] add(%select, %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024,1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024,1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024,1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
EXPECT_THAT(moved_all_reduce, op::Shape("f32[1024,1024]"));
HloInstruction* add_delta_to_old_buffer = find_op<HloOpcode::kAdd>(entry);
ASSERT_THAT(add_delta_to_old_buffer, NotNull());
EXPECT_THAT(add_delta_to_old_buffer, op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(0),
op::Shape("f32[1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(1),
op::Shape("f32[1024, 1024]"));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, SelectReduceScatterAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024,4096], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024,4096], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024,4096] get-tuple-element(%param), index=2
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024,1024] reduce-scatter(%gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={1}
%const.0 = f32[] constant(0)
%zeros = f32[1024,1024] broadcast(%const.0), dimensions={}
%scalarp = pred[] custom-call(), custom_call_target="something"
%predicates = pred[1024,1024] broadcast(%scalarp), dimensions={}
%select = f32[1024,1024] select(%predicates, %zeros, %reduce-scatter)
%accumulation = f32[1024,1024] add(%select, %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024,4096], f32[1024,1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024,4096] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024,1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 4096], f32[1024,1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 4096] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 4096], f32[1024,1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopAllReduceCodeMotion{true}.Run(
module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::ReduceScatter())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
EXPECT_THAT(accumulation_buffer, op::Shape("f32[1024,4096]"));
auto* moved_reduce_scatter = DynCast<HloReduceScatterInstruction>(
find_op<HloOpcode::kReduceScatter>(entry));
EXPECT_THAT(moved_reduce_scatter, op::Shape("f32[1024,1024]"));
HloInstruction* add_delta_to_old_buffer = find_op<HloOpcode::kAdd>(entry);
ASSERT_THAT(add_delta_to_old_buffer, NotNull());
EXPECT_THAT(add_delta_to_old_buffer, op::Shape("f32[1024,1024]"));
}
TEST_F(WhileLoopAllReduceCodeMotionTest,
SelectReduceScatterAccumulateNotScalarPredicate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024,4096], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024,4096], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024,4096] get-tuple-element(%param), index=2
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%reduce-scatter = f32[1024,1024] reduce-scatter(%gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction, dimensions={1}
%const.0 = f32[] constant(0)
%zeros = f32[1024,1024] broadcast(%const.0), dimensions={}
%predicates = pred[1024,1024] custom-call(), custom_call_target="something"
%select = f32[1024,1024] select(%predicates, %zeros, %reduce-scatter)
%accumulation = f32[1024,1024] add(%select, %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024,4096], f32[1024,1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024,4096] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024,1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 4096], f32[1024,1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 4096] %param.1, f32[1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 4096], f32[1024,1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopAllReduceCodeMotion{true}.Run(
module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, MultipleLoopCalls) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%convert.0 = bf16[1024, 1024] convert(f32[1024, 1024] %gte.2)
%all-reduce = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %convert.0), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction
%convert.1 = f32[1024, 1024] convert(bf16[1024, 1024] %all-reduce)
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %convert.1, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init.0 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while.0 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init.0), condition=%while_condition, body=%while_body
%gte.3 = f32[1024, 1024] get-tuple-element(%while.0), index=3
%while_init.1 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %gte.3)
%while.1 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init.0), condition=%while_condition, body=%while_body
ROOT %gte.4 = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024])%while.1), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
Matches(op::While())),
2);
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
Matches(op::AllReduce())),
2);
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
}
TEST_F(WhileLoopAllReduceCodeMotionTest, MultipleAllReduceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction.0 {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%reduction.1 {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = bf16[1024, 1024] get-tuple-element(%param), index=4
%gte.5 = bf16[1024, 1024] get-tuple-element(%param), index=5
%all-reduce.0 = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction.0
%accumulation.0 = f32[1024, 1024] add(f32[1024, 1024] %all-reduce.0, f32[1024, 1024] %gte.3)
%all-reduce.1 = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %gte.4), channel_id=2, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction.1
%accumulation.1 = bf16[1024, 1024] add(bf16[1024, 1024] %all-reduce.1, bf16[1024, 1024] %gte.5)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation.0, %gte.4, %accumulation.1)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%param.2 = bf16[1024, 1024] parameter(2)
%constant.0 = s32[] constant(1)
%accumulation_buffer.0 = f32[1024, 1024] constant({...})
%accumulation_buffer.1 = bf16[1024, 1024] constant({...})
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer.0, bf16[1024, 1024] %param.2, bf16[1024, 1024] %accumulation_buffer.1)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
Matches(op::AllReduce())),
2);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, MultipleReduceScatterAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_reduce_scatter
%reduction.0 {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%reduction.1 {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024], bf16[4096, 1024], bf16[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[4096, 1024], f32[1024, 1024], bf16[4096, 1024], bf16[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[4096, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = bf16[4096, 1024] get-tuple-element(%param), index=4
%gte.5 = bf16[1024, 1024] get-tuple-element(%param), index=5
%reduce-scatter.0 = f32[1024, 1024] reduce-scatter(f32[4096, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction.0, dimensions={0}
%accumulation.0 = f32[1024, 1024] add(f32[1024, 1024] %reduce-scatter.0, f32[1024, 1024] %gte.3)
%reduce-scatter.1 = bf16[1024, 1024] reduce-scatter(bf16[4096, 1024] %gte.4), channel_id=2, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction.1, dimensions={0}
%accumulation.1 = bf16[1024, 1024] add(bf16[1024, 1024] %reduce-scatter.1, bf16[1024, 1024] %gte.5)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[4096, 1024], f32[1024, 1024], bf16[4096, 1024], bf16[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation.0, %gte.4, %accumulation.1)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[4096, 1024] parameter(1)
%param.2 = bf16[4096, 1024] parameter(2)
%constant.0 = s32[] constant(1)
%accumulation_buffer.0 = f32[1024, 1024] constant({...})
%accumulation_buffer.1 = bf16[1024, 1024] constant({...})
%while_init = (s32[], s32[], f32[4096, 1024], f32[1024, 1024], bf16[4096, 1024], bf16[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[4096, 1024] %param.1, f32[1024, 1024] %accumulation_buffer.0, bf16[4096, 1024] %param.2, bf16[1024, 1024] %accumulation_buffer.1)
ROOT %while = (s32[], s32[], f32[4096, 1024], f32[1024, 1024], bf16[4096, 1024], bf16[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopAllReduceCodeMotion{true}.Run(
module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::ReduceScatter())));
constexpr std::array<std::pair<int64_t, absl::string_view>, 2> accum_buffers =
{{
{3, "f32[4096, 1024]"},
{5, "bf16[4096, 1024]"},
}};
for (auto [index, shape] : accum_buffers) {
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(index);
EXPECT_THAT(accumulation_buffer, op::Constant());
EXPECT_THAT(accumulation_buffer, op::Shape(shape));
}
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
Matches(op::ReduceScatter())),
2);
}
TEST_F(WhileLoopAllReduceCodeMotionTest, MixMovableAllReduceWithNotMovable) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction.0 {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
%reduction.1 {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = bf16[1024, 1024] get-tuple-element(%param), index=4
%gte.5 = bf16[1024, 1024] get-tuple-element(%param), index=5
%all-reduce.0 = f32[1024, 1024] all-reduce(f32[1024, 1024] %gte.2), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction.0
%accumulation.0 = f32[1024, 1024] add(f32[1024, 1024] %all-reduce.0, f32[1024, 1024] %gte.3)
%all-reduce.1 = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %gte.4), channel_id=2, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%reduction.1
%accumulation.1 = bf16[1024, 1024] add(bf16[1024, 1024] %all-reduce.1, bf16[1024, 1024] %gte.5)
%add.0 = bf16[1024, 1024] add(bf16[1024, 1024] %accumulation.1, bf16[1024, 1024] %gte.4)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation.0, %gte.4, %add.0)
}
ENTRY accumulated_all_reduce {
%param.0 = s32[] parameter(0)
%param.1 = f32[1024, 1024] parameter(1)
%param.2 = bf16[1024, 1024] parameter(2)
%constant.0 = s32[] constant(1)
%accumulation_buffer.0 = f32[1024, 1024] constant({...})
%accumulation_buffer.1 = bf16[1024, 1024] constant({...})
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) tuple(s32[] %constant.0, s32[] %param.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer.0, bf16[1024, 1024] %param.2, bf16[1024, 1024] %accumulation_buffer.1)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024], bf16[1024, 1024], bf16[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_EQ(absl::c_count_if(transformed_while->while_body()->instructions(),
Matches(op::AllReduce())),
1);
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
Matches(op::AllReduce())),
1);
}
TEST_F(WhileLoopAllReduceCodeMotionTest,
DynamicSliceAllReduceDynamicUpdateSliceAccumulate) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.1, %gte.0), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[2, 1024, 1024] get-tuple-element(%param), index=3
%offset-table = s32[8] constant({0, 0, 0, 0, 1, 1, 1, 1})
%partition-id = u32[] partition-id()
%offset-array = s32[1] dynamic-slice(%offset-table, %partition-id), dynamic_slice_sizes={1}
%offset = s32[] reshape(%offset-array)
%convert.0 = bf16[1024, 1024] convert(f32[1024, 1024] %gte.2)
%all-reduce = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %convert.0), channel_id=1, replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true, to_apply=%reduction
%convert.1 = f32[1024, 1024] convert(bf16[1024, 1024] %all-reduce)
%reshape = f32[1,1024, 1024] reshape(f32[1024, 1024] %convert.1)
%constant.2 = s32[] constant(0)
%dynamic-slice = f32[1,1024,1024] dynamic-slice(f32[2, 1024, 1024] %gte.3, s32[] %offset, s32[] %constant.2, s32[] %constant.2), dynamic_slice_sizes={1, 1024, 1024}
%accumulation = f32[1,1024,1024] add(f32[1, 1024, 1024] %reshape, f32[1, 1024, 1024] %dynamic-slice)
%dynamic-update-slice = f32[2,1024,1024] dynamic-update-slice(f32[2, 1024, 1024] %gte.3, f32[1, 1024, 1024] %accumulation, s32[] %offset, s32[] %constant.2, s32[] %constant.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.1, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) tuple(s32[] %gte.0, s32[] %increment_iteration, f32[1024, 1024] %gte.2, f32[2, 1024, 1024] %dynamic-update-slice)
}
ENTRY accumulated_all_reduce {
%param.0 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(8)
%constant.1 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[2, 1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) tuple(s32[] %constant.0, s32[] %constant.1, f32[1024, 1024] %param.0, f32[2, 1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
ASSERT_TRUE(simplified_loop);
TF_ASSERT_OK(
HloVerifier(false, true)
.Run(module.get())
.status());
HloComputation* entry = module->entry_computation();
HloInstruction* transformed_while = find_op<HloOpcode::kWhile>(entry);
ASSERT_THAT(transformed_while, NotNull());
EXPECT_THAT(transformed_while->while_body()->instructions(),
Each(Not(op::AllReduce())));
HloInstruction* accumulation_buffer =
transformed_while->mutable_operand(0)->mutable_operand(3);
EXPECT_THAT(accumulation_buffer, op::Constant());
HloAllReduceInstruction* moved_all_reduce =
DynCast<HloAllReduceInstruction>(find_op<HloOpcode::kAllReduce>(entry));
EXPECT_THAT(moved_all_reduce, op::Shape("bf16[2, 1024, 1024]"));
HloInstruction* add_delta_to_old_buffer = find_op<HloOpcode::kAdd>(entry);
ASSERT_THAT(add_delta_to_old_buffer, NotNull());
EXPECT_THAT(add_delta_to_old_buffer, op::Shape("f32[2, 1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(0),
op::Shape("f32[2, 1024, 1024]"));
EXPECT_THAT(add_delta_to_old_buffer->operand(1),
op::Shape("f32[2, 1024, 1024]"));
}
TEST_F(WhileLoopAllReduceCodeMotionTest,
DynamicSliceAllReduceDynamicUpdateSliceAccumulateNotMoved) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%reduction {
%x = bf16[] parameter(0)
%y = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %x, bf16[] %y)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.1, %gte.0), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[2, 1024, 1024] get-tuple-element(%param), index=3
%offset-table = s32[8] constant({0, 0, 0, 0, 1, 1, 1, 1})
%partition-id = u32[] partition-id()
%offset-array = s32[1] dynamic-slice(%offset-table, %partition-id), dynamic_slice_sizes={1}
%offset = s32[] reshape(%offset-array)
%convert.0 = bf16[1024, 1024] convert(f32[1024, 1024] %gte.2)
%all-reduce = bf16[1024, 1024] all-reduce(bf16[1024, 1024] %convert.0), channel_id=1, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=%reduction
%convert.1 = f32[1024, 1024] convert(bf16[1024, 1024] %all-reduce)
%reshape = f32[1,1024, 1024] reshape(f32[1024, 1024] %convert.1)
%constant.2 = s32[] constant(0)
%dynamic-slice = f32[1,1024,1024] dynamic-slice(f32[2, 1024, 1024] %gte.3, s32[] %offset, s32[] %constant.2, s32[] %constant.2), dynamic_slice_sizes={1, 1024, 1024}
%accumulation = f32[1,1024,1024] add(f32[1, 1024, 1024] %reshape, f32[1, 1024, 1024] %dynamic-slice)
%dynamic-update-slice = f32[2,1024,1024] dynamic-update-slice(f32[2, 1024, 1024] %gte.3, f32[1, 1024, 1024] %accumulation, s32[] %offset, s32[] %constant.2, s32[] %constant.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.1, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) tuple(s32[] %gte.0, s32[] %increment_iteration, f32[1024, 1024] %gte.2, f32[2, 1024, 1024] %dynamic-update-slice)
}
ENTRY accumulated_all_reduce {
%param.0 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(8)
%constant.1 = s32[] constant(1)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[2, 1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) tuple(s32[] %constant.0, s32[] %constant.1, f32[1024, 1024] %param.0, f32[2, 1024, 1024] %accumulation_buffer)
ROOT %while = (s32[], s32[], f32[1024, 1024], f32[2, 1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopAllReduceCodeMotion{}.Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_all_reduce_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_all_reduce_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9184746-8b58-4033-bae2-5a7c164051b9 | cpp | tensorflow/tensorflow | copy_insertion | third_party/xla/xla/service/copy_insertion.cc | third_party/xla/xla/service/copy_insertion_test.cc | #include "xla/service/copy_insertion.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/compile_time_cap.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using absl::StrAppend;
bool IsReadonlyEntryParameterValue(const HloValue& value) {
const HloComputation* computation = value.defining_instruction()->parent();
return value.defining_instruction()->opcode() == HloOpcode::kParameter &&
computation == computation->parent()->entry_computation() &&
!computation->parent()->input_output_alias_config().ParameterHasAlias(
value.defining_instruction()->parameter_number(), value.index());
}
bool IsConstantValue(const HloValue& value) {
return value.defining_instruction()->opcode() == HloOpcode::kConstant;
}
bool ValueIsReadOnly(const HloValue& value) {
return IsConstantValue(value) || IsReadonlyEntryParameterValue(value);
}
struct SpecialCaseCopyPolicy {
bool copy_root_replicated_buffers = false;
bool copy_parameters_and_constants = false;
};
SpecialCaseCopyPolicy GetSpecialCaseCopyPolicy(const CallGraphNode& node,
HloModule* module,
HloComputation* computation) {
SpecialCaseCopyPolicy policy;
if (computation == module->entry_computation()) {
policy.copy_parameters_and_constants = true;
policy.copy_root_replicated_buffers = true;
}
return policy;
}
bool ShouldCopyRootValue(const HloValue& value,
const SpecialCaseCopyPolicy& policy) {
if (policy.copy_parameters_and_constants) {
return ValueIsReadOnly(value);
}
return false;
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
DeepCopyAndAddControlEdges(HloInstruction* from, HloInstruction* to,
const ShapeTree<bool>& indices_to_copy) {
DCHECK(ShapeUtil::Compatible(from->shape(), to->shape()));
ShapeTree<HloInstruction*> from_copy_tree(from->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * from_deep_copy,
from->parent()->DeepCopyInstruction(
from, &indices_to_copy, &from_copy_tree));
ShapeTree<HloInstruction*> to_copy_tree(to->shape(), nullptr);
TF_ASSIGN_OR_RETURN(
HloInstruction * to_deep_copy,
to->parent()->DeepCopyInstruction(to, &indices_to_copy, &to_copy_tree));
for (const auto& pair : from_copy_tree) {
const ShapeIndex& index = pair.first;
HloInstruction* from_copy = pair.second;
HloInstruction* to_copy = to_copy_tree.element(index);
if (from_copy == nullptr) {
TF_RET_CHECK(to_copy == nullptr);
continue;
}
TF_RET_CHECK(to_copy != nullptr);
TF_RETURN_IF_ERROR(from_copy->AddControlDependencyTo(to_copy));
}
return std::make_pair(from_deep_copy, to_deep_copy);
}
bool IndicesToCopyForWhile(const HloDataflowAnalysis& dataflow,
const HloInstruction* xla_while,
ShapeTree<bool>* indices_to_copy) {
DCHECK(ShapeUtil::Compatible(indices_to_copy->shape(), xla_while->shape()));
bool any_copies = false;
const HloInstruction* init = xla_while->operand(0);
for (auto& pair : *indices_to_copy) {
const ShapeIndex& index = pair.first;
bool& should_copy = pair.second;
if (dataflow.GetValueSet(init, index).values().size() > 1 ||
dataflow.GetValueSet(xla_while, index).values().size() > 1) {
should_copy = true;
} else {
should_copy = dataflow.GetUniqueValueAt(xla_while, index) !=
dataflow.GetUniqueValueAt(init, index);
}
any_copies |= should_copy;
}
return any_copies;
}
bool IndicesToCopyForConditional(const HloDataflowAnalysis& dataflow,
const HloInstruction* xla_conditional,
ShapeTree<bool>* indices_to_copy) {
DCHECK(ShapeUtil::Compatible(indices_to_copy->shape(),
xla_conditional->shape()));
bool any_copies = false;
for (auto& pair : *indices_to_copy) {
const ShapeIndex& index = pair.first;
bool& should_copy = pair.second;
CHECK_EQ(dataflow.GetValueSet(xla_conditional, index).values().size(), 1);
auto value = dataflow.GetValueSet(xla_conditional, index).values()[0];
should_copy =
value->is_phi() && value->defining_instruction() == xla_conditional;
any_copies |= should_copy;
}
return any_copies;
}
absl::Status AddCopiesForWhile(const HloAliasAnalysis& alias_analysis,
HloInstruction* xla_while) {
VLOG(2) << "Adding copies for kWhile instruction " << xla_while->name();
TF_RET_CHECK(xla_while->opcode() == HloOpcode::kWhile);
ShapeTree<bool> indices_to_copy(xla_while->shape());
if (!IndicesToCopyForWhile(alias_analysis.dataflow_analysis(), xla_while,
&indices_to_copy)) {
VLOG(2) << "No copies necessary for kWhile instruction "
<< xla_while->name();
return absl::OkStatus();
}
VLOG(2) << "Adding copies for " << xla_while->name() << " at indices:";
for (auto& pair : indices_to_copy) {
if (pair.second) {
VLOG(2) << " " << pair.first;
}
}
HloInstruction* while_init = xla_while->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * while_init_copy,
xla_while->parent()->DeepCopyInstruction(while_init, &indices_to_copy));
TF_RETURN_IF_ERROR(while_init->ReplaceUseWith(xla_while, while_init_copy));
HloComputation* body = xla_while->while_body();
HloInstruction* param = body->parameter_instruction(0);
HloInstruction* root = body->root_instruction();
TF_RET_CHECK(param != root);
std::vector<HloInstruction*> param_users = param->users();
TF_ASSIGN_OR_RETURN(auto pair,
DeepCopyAndAddControlEdges(param, root, indices_to_copy));
HloInstruction* param_copy = pair.first;
HloInstruction* root_copy = pair.second;
for (HloInstruction* user : param_users) {
TF_RETURN_IF_ERROR(param->ReplaceUseWith(user, param_copy));
}
body->set_root_instruction(root_copy);
return absl::OkStatus();
}
absl::Status AddCopiesForInPlaceOperation(
const HloAliasAnalysis& alias_analysis, HloInstruction* in_place_op,
int64_t operand_number) {
VLOG(2) << "Adding copies for in-place operation " << in_place_op->name();
HloInstruction* operand = in_place_op->mutable_operand(operand_number);
TF_ASSIGN_OR_RETURN(HloInstruction * deep_copy,
in_place_op->parent()->DeepCopyInstruction(operand));
TF_RETURN_IF_ERROR(
operand->ReplaceUseWith(in_place_op, operand_number, deep_copy));
return absl::OkStatus();
}
absl::Status AddCopiesForAliasedInputOutputs(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloComputation* entry = module->entry_computation();
if (!HloInstruction::IsThreadIncluded(entry->execution_thread(),
execution_threads)) {
return absl::OkStatus();
}
HloInstruction* root = entry->root_instruction();
ShapeTree<bool> output_indices_to_copy(root->shape());
std::vector<std::optional<ShapeTree<HloInstruction*>>> copied_parameters(
entry->num_parameters());
bool has_alias = false;
for (auto* param : entry->parameter_instructions()) {
bool param_has_alias = false;
ShapeTree<bool> param_indices_to_copy(param->shape());
module->input_output_alias_config().ForEachAlias(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias& alias) {
if (alias.parameter_number == param->parameter_number()) {
param_has_alias = true;
*(param_indices_to_copy.mutable_element(alias.parameter_index)) =
true;
*(output_indices_to_copy.mutable_element(output_index)) = true;
}
});
if (!param_has_alias) {
continue;
}
TF_RET_CHECK(param->parameter_number() < entry->num_parameters());
TF_RET_CHECK(!copied_parameters[param->parameter_number()]);
has_alias = true;
std::vector<HloInstruction*> users = param->users();
ShapeTree<HloInstruction*> param_copy_tree(param->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * copied,
entry->DeepCopyInstruction(
param, ¶m_indices_to_copy, ¶m_copy_tree));
if (param == root) {
entry->set_root_instruction(copied);
root = copied;
}
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(param->ReplaceUseWith(user, copied));
}
copied_parameters[param->parameter_number()] = param_copy_tree;
}
if (!has_alias) {
return absl::OkStatus();
}
ShapeTree<HloInstruction*> output_copy_tree(root->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * root_copied,
root->parent()->DeepCopyInstruction(
root, &output_indices_to_copy, &output_copy_tree));
TF_RETURN_IF_ERROR(module->input_output_alias_config().ForEachAliasWithStatus(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias& alias) -> absl::Status {
if (!copied_parameters[alias.parameter_number]) {
return absl::OkStatus();
}
HloInstruction* from =
copied_parameters[alias.parameter_number]->element(
alias.parameter_index);
HloInstruction* to = output_copy_tree.element(output_index);
TF_RET_CHECK(from != nullptr);
TF_RET_CHECK(to != nullptr);
TF_RETURN_IF_ERROR(from->AddControlDependencyTo(to));
return absl::OkStatus();
}));
entry->set_root_instruction(root_copied);
return absl::OkStatus();
}
absl::Status StripControlDependenciesFrom(HloInstruction* instruction) {
while (!instruction->control_successors().empty()) {
TF_RETURN_IF_ERROR(instruction->RemoveControlDependencyTo(
instruction->control_successors().front()));
}
while (!instruction->control_predecessors().empty()) {
TF_RETURN_IF_ERROR(
instruction->control_predecessors().front()->RemoveControlDependencyTo(
instruction));
}
return absl::OkStatus();
}
class LiveRangeRegions {
public:
struct InstructionInfo {
InstructionInfo() : value_definition(nullptr), is_definition(false) {}
HloInstruction* value_definition;
bool is_definition;
std::string ToString() const {
return absl::StrCat(
"is_definition: ", std::to_string(is_definition),
", value_definition: ",
value_definition ? value_definition->name() : "nullptr");
}
};
typedef HloInstructionMap<InstructionInfo> InstructionMap;
typedef std::pair<HloInstruction*, InstructionInfo> InstructionEntry;
typedef absl::flat_hash_map<const HloComputation*, InstructionMap>
ComputationMap;
InstructionMap& operator[](const HloComputation* computation) {
if (computation_map_.find(computation) == computation_map_.end()) {
computation_vector_.push_back(computation);
}
return computation_map_[computation];
}
const InstructionMap& operator[](const HloComputation* computation) const {
ComputationMap::const_iterator p = computation_map_.find(computation);
CHECK(p != computation_map_.end());
return p->second;
}
absl::InlinedVector<const HloComputation*, 5>::const_iterator begin() const {
return computation_vector_.begin();
}
absl::InlinedVector<const HloComputation*, 5>::const_iterator end() const {
return computation_vector_.end();
}
int64_t size() const {
CHECK_EQ(computation_vector_.size(), computation_map_.size());
return computation_vector_.size();
}
bool empty() const { return size() == 0; }
const HloComputation* Computation(int64_t index) const {
return computation_vector_[index];
}
bool contains(HloInstruction* instr) const {
CHECK_NE(instr, nullptr);
auto* computation = instr->parent();
auto p = computation_map_.find(computation);
if (p == computation_map_.end()) {
return false;
}
auto instr_map = (*p).second;
return instr_map.find(instr) != instr_map.end();
}
std::string ToString() const {
std::string result;
for (const auto* computation : computation_vector_) {
StrAppend(&result, "computation: ", computation->name(), "\n");
for (const auto& entry : computation_map_.at(computation)) {
StrAppend(&result, " entry: ", entry.first->name(), ", ",
entry.second.ToString(), "\n");
}
}
return result;
}
private:
ComputationMap computation_map_;
absl::InlinedVector<const HloComputation*, 5> computation_vector_;
};
namespace {
class Relation {
public:
enum RuntimeOrder {
kNoOverlap = 0,
kSameInstr = 1,
kBeforeStart = 2,
kBeforeStartOrSameInstr = kBeforeStart | kSameInstr,
kAfterEnd = 4,
kAfterEndOrSameInstr = kAfterEnd | kSameInstr,
kBeforeStartOrAfterEnd = kBeforeStart | kAfterEnd,
kBeforeOrAfterOrOverlap = kBeforeStart | kAfterEnd | kSameInstr,
};
Relation() : intercept_def_use_(false) {}
explicit Relation(RuntimeOrder order, bool intercept_def_use = false)
: intercept_def_use_(intercept_def_use) {
orders_.push_back(order);
}
Relation(const Relation& that)
: intercept_def_use_(that.intercept_def_use_), orders_(that.orders_) {}
bool operator==(const Relation& that) const {
return intercept_def_use_ == that.intercept_def_use_ &&
absl::c_equal(orders_, that.orders_);
}
bool UseImpliesInterception() const {
CHECK_EQ(orders_.size(), 1);
return UseImpliesInterception(orders_[0]);
}
bool DefinitionImpliesInterception() const {
CHECK_EQ(orders_.size(), 1);
return DefinitionImpliesInterception(orders_[0]);
}
bool InterceptDefUse() const { return intercept_def_use_; }
void UpdateInterception(bool value) {
CHECK_EQ(orders_.size(), 1);
intercept_def_use_ = value;
}
Relation::RuntimeOrder GetRuntimeOrder() const {
if (orders_.empty()) {
return Relation::kNoOverlap;
}
CHECK_EQ(orders_.size(), 1);
return orders_[0];
}
bool RuntimeOrderOverlap() const {
return absl::c_any_of(orders_, ImpliesOverlap);
}
bool RuntimeOrderIsUnordered() const {
return orders_.size() == 1 && orders_[0] == kBeforeStartOrAfterEnd;
}
bool RuntimeOrderIsNoOverlap() const {
return orders_.empty() || (orders_.size() == 1 && orders_[0] == kNoOverlap);
}
bool RuntimeOrderIsRunBefore() const {
return orders_.size() == 1 && orders_[0] == kBeforeStart;
}
bool RuntimeOrderIsRunAfter() const {
return orders_.size() == 1 && orders_[0] == kAfterEnd;
}
std::string ToString() const {
return absl::StrCat("Interception = ", intercept_def_use_, ";",
absl::StrJoin(orders_, ","));
}
static bool DefinitionImpliesInterception(RuntimeOrder definition) {
return (definition == kAfterEnd || definition == kBeforeStartOrAfterEnd);
}
static bool UseImpliesInterception(RuntimeOrder use) {
return (use == kBeforeStart || use == kBeforeStartOrAfterEnd);
}
void UnionRelationFromSameSource(const Relation& rel) {
CHECK_LE(orders_.size(), 1);
CHECK_EQ(rel.orders_.size(), 1);
if (orders_.empty()) {
orders_.push_back(rel.orders_[0]);
} else {
orders_[0] = Union(orders_[0], rel.orders_[0]);
}
intercept_def_use_ = intercept_def_use_ || rel.intercept_def_use_;
}
void UnionRelationFromDifferentSource(const Relation& rel) {
if (rel.orders_.empty()) {
return;
}
CHECK_EQ(rel.orders_.size(), 1);
intercept_def_use_ = intercept_def_use_ || rel.intercept_def_use_;
for (auto& local_order : orders_) {
if (OverwriteIfSubsume(rel.orders_[0], &local_order)) {
return;
}
}
orders_.push_back(rel.orders_[0]);
}
static Relation::RuntimeOrder ReverseRuntimeOrder(RuntimeOrder order) {
switch (order) {
case kNoOverlap:
case kSameInstr:
case kBeforeStartOrAfterEnd:
case kBeforeOrAfterOrOverlap:
return order;
case kBeforeStart:
return kAfterEnd;
case kBeforeStartOrSameInstr:
return kAfterEndOrSameInstr;
case kAfterEnd:
return kBeforeStart;
case kAfterEndOrSameInstr:
return kBeforeStartOrSameInstr;
}
}
private:
bool intercept_def_use_;
absl::InlinedVector<RuntimeOrder, 4> orders_;
static RuntimeOrder Union(RuntimeOrder o1, RuntimeOrder o2) {
return static_cast<Relation::RuntimeOrder>(o1 | o2);
}
static bool ImpliesOverlap(RuntimeOrder o) {
return o >= RuntimeOrder::kBeforeStartOrAfterEnd;
}
static bool Subsume(RuntimeOrder o1, RuntimeOrder o2) {
return Union(o1, o2) == o1;
}
static bool OverwriteIfSubsume(RuntimeOrder o2, RuntimeOrder* o1) {
if (*o1 == o2) {
return true;
}
CHECK_NE(o1, nullptr);
if (Subsume(o2, *o1)) {
*o1 = o2;
return true;
} else if (Subsume(*o1, o2)) {
return true;
}
return false;
}
};
class ComputeRelativeLocation {
public:
typedef LiveRangeRegions::InstructionEntry InstructionEntry;
explicit ComputeRelativeLocation(HloOrdering* ordering)
: ordering_(ordering) {
VLOG(3) << "New analysis";
}
Relation Compute(const InstructionEntry& entry1,
const InstructionEntry& entry2, bool instr2_can_modify) {
auto def = entry1.second.value_definition;
auto use = entry1.first;
Relation::RuntimeOrder order =
ComputeRuntimeOrdering(entry2.first, entry1.first);
if (order == Relation::kSameInstr &&
entry1.second.is_definition != entry2.second.is_definition) {
if (entry1.second.is_definition) {
order = Relation::kBeforeStart;
} else {
order = Relation::kAfterEnd;
}
}
bool intercept = AlwaysForceInterception(entry2.first);
if (def == nullptr || !instr2_can_modify) {
return Relation(order, intercept);
}
if (def->opcode() == HloOpcode::kParameter &&
use == use->parent()->root_instruction()) {
VLOG(3) << "Setting interception due to parameter/root relation";
return Relation(order, true);
}
if (use->parent() == def->parent() &&
ComputeRuntimeOrdering(use, entry2.first) == Relation::kAfterEnd &&
def->opcode() == HloOpcode::kWhile &&
entry2.first->parent() == def->while_body()) {
return Relation(order, false);
}
if (use->parent() == def->parent() &&
ComputeRuntimeOrdering(def, entry2.first) == Relation::kBeforeStart &&
use->opcode() == HloOpcode::kWhile &&
entry2.first->parent() == use->while_body()) {
return Relation(order, false);
}
if (use->parent() == def->parent() &&
def->parent()->IsConditionalBranchComputation() &&
def == entry2.first && def->shape().IsTuple()) {
VLOG(3) << "Setting interception for multi-output instruction inside "
"conditional branch: "
<< def->name();
return Relation(order, true);
}
if (Relation::UseImpliesInterception(order)) {
auto order2 = ComputeRuntimeOrdering(entry2.first, def);
if (Relation::DefinitionImpliesInterception(order2)) {
VLOG(3) << "Setting interception for " << def->ToString()
<< " with use: " << entry1.first->ToString();
intercept = true;
}
}
return Relation(order, intercept);
}
Relation Compute(const LiveRangeRegions& range1,
const LiveRangeRegions& range2) {
Relation dir_src_dest;
for (const auto* computation1 : range1) {
for (const auto* computation2 : range2) {
for (auto instr_entry2 : range2[computation2]) {
if (!ordering_->call_graph().Dominates(computation1, computation2)) {
continue;
}
VLOG(3) << "Locationing " << instr_entry2.first->ToString();
bool instr2_can_modify =
InstructionCanIntercept(instr_entry2, range1);
Relation instr2_relation;
std::vector<InstructionEntry> unordered_ops;
bool unordered_intercept = false;
for (auto instr_entry1 : range1[computation1]) {
auto rel = Compute(instr_entry1, instr_entry2, instr2_can_modify);
VLOG(3) << "New relation with " << instr_entry1.first->name()
<< ": " << rel.ToString();
if (!rel.RuntimeOrderIsUnordered()) {
instr2_relation.UnionRelationFromSameSource(rel);
} else {
unordered_ops.push_back(instr_entry1);
unordered_intercept |= rel.InterceptDefUse();
}
VLOG(3) << "instr2 relation: " << instr2_relation.ToString();
}
if (!ForceRuntimeOrder(unordered_ops, instr_entry2,
instr2_relation.GetRuntimeOrder())) {
VLOG(3) << "Unable to force ordering of unordered ops";
instr2_relation.UnionRelationFromSameSource(Relation(
Relation::kBeforeStartOrAfterEnd, unordered_intercept));
}
dir_src_dest.UnionRelationFromDifferentSource(instr2_relation);
VLOG(3) << "Resulting relation: " << dir_src_dest.ToString();
}
}
}
return dir_src_dest;
}
bool AddControlDependenceForUnorderedOps() {
if (ctrl_deps_.empty()) {
return true;
}
PredecessorHloOrdering* ordering =
dynamic_cast<PredecessorHloOrdering*>(ordering_);
if (ordering == nullptr) {
return false;
}
for (const auto& comp_it : ctrl_deps_) {
HloComputation* parent = comp_it.first;
HloReachabilityMap& reachability_map = ordering->reachability_map(parent);
for (const auto& instr_it : comp_it.second) {
HloInstruction* entry1 = instr_it.first;
for (HloInstruction* entry2 : instr_it.second) {
VLOG(3) << "Add control dependence between " << entry2->name()
<< " vs " << entry1->name();
TF_CHECK_OK(entry2->AddControlDependencyTo(entry1));
}
reachability_map.UpdateReachabilityThroughInstruction(entry1);
for (HloInstruction* entry2 : instr_it.second) {
DCHECK(ordering_->GetExecutionConstraint(entry1, entry2) ==
HloOrdering::ExecutionConstraint::kRunAfter);
}
}
}
return true;
}
private:
enum ComputeStatus {
kFullyComputed,
kPartiallyComputed,
kNotComputed,
};
typedef std::pair<ComputeStatus, Relation::RuntimeOrder> SavedRelation;
bool ForceRuntimeOrder(absl::Span<const InstructionEntry> unordered_ops,
const InstructionEntry entry2,
Relation::RuntimeOrder desired_relation) {
if (unordered_ops.empty()) {
return true;
}
if (desired_relation != Relation::kBeforeStart &&
desired_relation != Relation::kAfterEnd) {
return false;
}
auto ModifiesNonCopy = [](HloInstruction* instr, const HloInstruction* op) {
auto in_place = HloDataflowAnalysis::GetInPlaceInputOutputPairs(instr);
if (in_place.empty()) {
return false;
}
return absl::c_any_of(
in_place, [&](const std::pair<HloOperandIndex, ShapeIndex>&
operand_and_output_index) {
auto* op2 =
instr->operand(operand_and_output_index.first.operand_number);
return (op == nullptr) ? (op2->opcode() == HloOpcode::kCopy)
: (op2 == op);
});
};
for (const InstructionEntry& entry1 : unordered_ops) {
if (entry1.first->parent() != entry2.first->parent()) {
return false;
}
HloInstruction* pred = (desired_relation == Relation::kBeforeStart)
? entry2.first
: entry1.first;
HloInstruction* succ = (desired_relation == Relation::kBeforeStart)
? entry1.first
: entry2.first;
if (pred == pred->parent()->root_instruction()) {
return false;
}
if (succ->opcode() == HloOpcode::kCopy &&
ModifiesNonCopy(pred, succ->operand(0))) {
VLOG(3) << "Failed to force unordered op ordering due to copy ordering "
<< " between " << pred->name() << " vs " << succ->name();
return false;
}
}
for (const InstructionEntry& entry1 : unordered_ops) {
Save(entry2.first, entry1.first, desired_relation,
true);
}
return true;
}
static bool AlwaysForceInterception(HloInstruction* instr) {
if (HloDataflowAnalysis::IsAsynchronousOperationStart(instr->opcode()) ||
HloDataflowAnalysis::IsAsynchronousOperationDone(instr->opcode())) {
return true;
}
switch (instr->opcode()) {
case HloOpcode::kCollectivePermute:
return true;
default:
return false;
}
}
bool InstructionCanIntercept(const InstructionEntry& entry,
const LiveRangeRegions& region) {
auto instr = entry.first;
if (!entry.second.is_definition) {
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(instr)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (region.contains(
instr->mutable_operand(operand_index.operand_number))) {
return true;
}
}
return false;
}
switch (instr->opcode()) {
case HloOpcode::kCopy: {
HloInstruction* operand = instr->mutable_operand(0);
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->mutable_operand(0);
}
if (region.contains(operand) &&
ShapeUtil::Equal(instr->shape(), instr->operand(0)->shape())) {
return false;
}
return true;
}
case HloOpcode::kParameter:
case HloOpcode::kTuple:
case HloOpcode::kGetTupleElement:
case HloOpcode::kWhile:
case HloOpcode::kCall:
case HloOpcode::kConditional:
return false;
default:
return true;
}
return true;
}
SavedRelation AlreadyComputed(HloInstruction* op1, HloInstruction* op2) {
auto p2 = saved_relations_.find(op2);
if (p2 != saved_relations_.end()) {
auto p1 = (*p2).second.find(op1);
if (p1 != (*p2).second.end()) {
return SavedRelation(kFullyComputed, (*p1).second);
}
}
p2 = saved_relations_.find(op1);
if (p2 != saved_relations_.end()) {
auto p1 = (*p2).second.find(op2);
if (p1 != (*p2).second.end()) {
return SavedRelation(kPartiallyComputed,
Relation::ReverseRuntimeOrder((*p1).second));
}
}
return SavedRelation(kNotComputed, Relation::kNoOverlap);
}
Relation::RuntimeOrder Save(HloInstruction* entry1, HloInstruction* entry2,
const Relation::RuntimeOrder relation,
bool is_unordered_originally = false) {
CHECK_EQ(AlreadyComputed(entry1, entry2).first, kNotComputed);
CHECK_NE(relation, Relation::kBeforeStartOrAfterEnd);
saved_relations_[entry2][entry1] = relation;
if (is_unordered_originally) {
CHECK(relation == Relation::kBeforeStart ||
relation == Relation::kAfterEnd)
<< relation;
HloInstruction* pred =
(relation == Relation::kBeforeStart) ? entry1 : entry2;
HloInstruction* succ =
(relation == Relation::kBeforeStart) ? entry2 : entry1;
VLOG(3) << "Save unordered relation: " << pred->name() << " vs "
<< succ->name();
CHECK_EQ(succ->parent(), pred->parent());
auto& dep_vec = ctrl_deps_[succ->parent()][succ];
for (HloInstruction*& op : dep_vec) {
auto rel = AlreadyComputed(pred, op);
if (rel.first != kNotComputed) {
if (rel.second == Relation::kAfterEnd) {
op = pred;
} else {
CHECK(rel.second == Relation::kBeforeStart);
}
return relation;
}
}
VLOG(2) << "Forcing unordered: " << pred->name() << " vs "
<< succ->name();
dep_vec.push_back(pred);
}
return relation;
}
Relation::RuntimeOrder ComputeRuntimeOrdering(HloInstruction* instr1,
HloInstruction* instr2) {
auto saved_relation = AlreadyComputed(instr1, instr2);
if (saved_relation.first != kNotComputed) {
VLOG(3) << "Already computed between " << instr1->name() << " vs "
<< instr2->name();
return saved_relation.second;
}
auto constraint = ordering_->GetExecutionConstraint(instr1, instr2);
switch (constraint) {
case HloOrdering::ExecutionConstraint::kIsSame:
return Save(instr1, instr2, Relation::kSameInstr);
case HloOrdering::ExecutionConstraint::kRunBeforeEnd:
return Save(instr1, instr2, Relation::kBeforeStartOrSameInstr);
case HloOrdering::ExecutionConstraint::kRunBeforeStart:
return Save(instr1, instr2, Relation::kBeforeStart);
case HloOrdering::ExecutionConstraint::kRunAfter:
return Save(instr1, instr2, Relation::kAfterEnd);
case HloOrdering::ExecutionConstraint::kRunExclusiveBefore:
case HloOrdering::ExecutionConstraint::kRunExclusiveAfter:
return Save(instr1, instr2, Relation::kNoOverlap);
case HloOrdering::ExecutionConstraint::kUnordered: {
if (instr1->parent() != instr2->parent()) {
return Relation::kBeforeStartOrAfterEnd;
}
auto ControlDependenceBefore = [&](HloInstruction* op1,
HloInstruction* op2) {
auto constraint = ComputeRuntimeOrdering(op1, op2);
if (constraint == Relation::kBeforeStart ||
constraint == Relation::kSameInstr ||
constraint == Relation::kBeforeStartOrSameInstr) {
return true;
} else {
return false;
}
};
if (!ctrl_deps_.empty()) {
auto ctrl_deps = ctrl_deps_[instr1->parent()];
if (absl::c_any_of(ctrl_deps[instr2], [&](HloInstruction* pred2) {
return ControlDependenceBefore(instr1, pred2);
})) {
VLOG(2) << "control-dependent: " << instr1->name() << " vs "
<< instr2->name();
return Save(instr1, instr2, Relation::kBeforeStart);
} else if (absl::c_any_of(
ctrl_deps[instr1], [&](HloInstruction* pred1) {
return ControlDependenceBefore(instr2, pred1);
})) {
VLOG(2) << "control-dependent: " << instr2->name() << " vs "
<< instr1->name();
return Save(instr1, instr2, Relation::kAfterEnd);
}
}
return Relation::kBeforeStartOrAfterEnd;
}
}
}
HloOrdering* ordering_;
absl::flat_hash_map<
HloInstruction*,
absl::flat_hash_map<HloInstruction*, Relation::RuntimeOrder>>
saved_relations_;
absl::flat_hash_map<
HloComputation*,
absl::flat_hash_map<HloInstruction*, std::vector<HloInstruction*>>>
ctrl_deps_;
};
}
class CopyRemover {
public:
struct ValueNode {
explicit ValueNode(const HloValue* v) : value(v) {}
const HloValue* value;
std::vector<const HloUse*> uses;
ValueNode* prev = nullptr;
ValueNode* next = nullptr;
};
CopyRemover(const HloModule& module, const HloAliasAnalysis& alias_analysis,
HloOrdering* ordering, bool check_live_range_ordering,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: dataflow_(alias_analysis.dataflow_analysis()), ordering_(ordering) {
absl::flat_hash_map<int, int64_t> instruction_ids;
int64_t id = 0;
for (HloComputation* computation : module.MakeComputationPostOrder()) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
instruction_ids[instruction->unique_id()] = id++;
}
}
absl::flat_hash_map<const HloValue*, ValueNode*> value_to_node;
for (const HloBuffer& buffer : alias_analysis.buffers()) {
if (buffer.values().at(0)->defining_instruction()->IsFused()) {
continue;
}
if (check_live_range_ordering) {
auto should_skip_value = [&execution_threads](const HloValue* value) {
return value->defining_instruction()->parent() != nullptr &&
!HloInstruction::IsThreadIncluded(value->defining_instruction()
->parent()
->execution_thread(),
execution_threads);
};
for (const HloValue* value_a : buffer.values()) {
if (value_a->shape().IsToken()) {
continue;
}
if (should_skip_value(value_a)) {
continue;
}
for (const HloValue* value_b : buffer.values()) {
if (!should_skip_value(value_b) && value_a != value_b) {
DCHECK(ordering_->LiveRangeStrictlyBefore(
*value_a, *value_b, dataflow_,
true) ||
ordering_->LiveRangeStrictlyBefore(
*value_b, *value_a, dataflow_,
true))
<< value_a->ToString() << " and " << value_b->ToString()
<< " are not ordered";
}
}
}
}
std::vector<const HloValue*> values = buffer.values();
absl::c_sort(values, [this, &instruction_ids](const HloValue* a,
const HloValue* b) {
if (a == b) {
return false;
}
const bool a_has_smaller_id =
instruction_ids.at(a->defining_instruction()->unique_id()) <
instruction_ids.at(b->defining_instruction()->unique_id());
if (a_has_smaller_id) {
if (ordering_->IsDefinedBefore(*a, *b)) {
return true;
}
if (ordering_->IsDefinedBefore(*b, *a)) {
return false;
}
} else {
if (ordering_->IsDefinedBefore(*b, *a)) {
return false;
}
if (ordering_->IsDefinedBefore(*a, *b)) {
return true;
}
}
return a_has_smaller_id;
});
AddValueList(values, &value_to_node);
}
CreateCopyMap(module, value_to_node);
XLA_VLOG_LINES(3, ToString());
TF_DCHECK_OK(Verify());
}
void AddValueList(
absl::Span<const HloValue* const> values,
absl::flat_hash_map<const HloValue*, ValueNode*>* value_to_node) {
ValueNode* tail = nullptr;
ValueNode* head = nullptr;
for (const HloValue* value : values) {
auto new_node = new ValueNode(value);
(*value_to_node)[value] = new_node;
new_node->uses.reserve(value->GetUses().size());
for (const HloUse& use : value->GetUses()) {
new_node->uses.push_back(&use);
}
if (tail == nullptr) {
head = new_node;
} else {
tail->next = new_node;
new_node->prev = tail;
}
tail = new_node;
}
tail->next = head;
head->prev = tail;
value_lists_.insert(head);
}
void CreateCopyMap(
const HloModule& module,
const absl::flat_hash_map<const HloValue*, ValueNode*>& value_to_node) {
for (HloComputation* computation : module.MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
const HloValueSet& src_value_set =
dataflow_.GetValueSet(instruction->operand(0));
if (src_value_set.values().size() == 1) {
CopyNodes& copy_node = copy_map_[instruction];
copy_node.dest =
value_to_node.at(&dataflow_.GetUniqueValueAt(instruction));
copy_node.src = value_to_node.at(&src_value_set.GetUniqueValue());
}
}
}
}
}
~CopyRemover() {
for (const ValueNode* head : value_lists_) {
const ValueNode* p = head;
do {
const ValueNode* tmp = p->next;
delete p;
p = tmp;
} while (p != head);
}
}
absl::Status Verify() const {
for (const ValueNode* head : value_lists_) {
const ValueNode* p = head;
do {
TF_RET_CHECK(p->prev->next == p);
TF_RET_CHECK(p->next->prev == p);
const HloInstruction* def = p->value->defining_instruction();
if (def->opcode() == HloOpcode::kCopy && ContainsKey(copy_map_, def)) {
TF_RET_CHECK(copy_map_.at(def).dest == p);
}
for (const HloUse* use : p->uses) {
if (use->instruction->opcode() == HloOpcode::kCopy &&
ContainsKey(copy_map_, use->instruction)) {
TF_RET_CHECK(copy_map_.at(use->instruction).src == p);
}
}
p = p->next;
} while (p != head);
}
return absl::OkStatus();
}
LiveRangeRegions ComputeLiveRangeRegions(const ValueNode* head) {
LiveRangeRegions live_range;
auto VisitValueNode = [&](const ValueNode* node) {
HloInstruction* def_op = node->value->instruction();
HloComputation* def_parent = def_op->parent();
live_range[def_parent][def_op].is_definition = true;
for (const auto& use : node->uses) {
auto* use_op = use->instruction;
HloComputation* use_parent = use_op->parent();
live_range[use_parent][use_op].value_definition = def_op;
}
};
ForEachValueInRange(head, VisitValueNode);
return live_range;
}
bool TryElideCopy(const HloInstruction* copy,
int64_t* region_analysis_limit) {
VLOG(2) << "Trying to remove " << copy->name();
CHECK_NE(region_analysis_limit, nullptr);
if (!ContainsKey(copy_map_, copy)) {
VLOG(2) << copy->name() << " is not removable";
return false;
}
if (!ShapeUtil::Equal(copy->shape(), copy->operand(0)->shape())) {
VLOG(2) << copy->name() << " is not removable (shape mismatch)";
return false;
}
const CopyNodes& copy_node = copy_map_.at(copy);
DCHECK(copy_node.src != nullptr);
DCHECK(copy_node.dest != nullptr);
int64_t live_range_size1 = 0, live_range_size2 = 0;
ForEachValueInRange(copy_node.src, [&](const ValueNode* node) {
live_range_size1 += 1 + node->uses.size();
});
ForEachValueInRange(copy_node.dest, [&](const ValueNode* node) {
live_range_size2 += 1 + node->uses.size();
});
bool use_region_analysis =
copy->operand(0)->opcode() != HloOpcode::kBroadcast &&
(*region_analysis_limit < 0 ||
live_range_size1 * live_range_size2 <= *region_analysis_limit);
*region_analysis_limit = 0;
VLOG(3) << copy->name() << " copies value "
<< copy_node.src->value->ToShortString();
VLOG(3) << "Source buffer values: " << ValueListToString(copy_node.src);
VLOG(3) << "Dest buffer values: " << ValueListToString(copy_node.dest);
auto CheckLiveRangeBefore = [&](ValueNode* src, ValueNode* dest) {
for (ValueNode* next_dest = dest; next_dest != nullptr;
next_dest = Next(*next_dest)) {
for (ValueNode* prev_src = src; prev_src != nullptr;
prev_src = Prev(*prev_src)) {
if (!LiveRangeBefore(*prev_src, *next_dest)) {
VLOG(2) << "Live range of " << prev_src->value->ToShortString()
<< " is not before " << next_dest->value->ToShortString();
return false;
}
}
}
return true;
};
auto CheckLiveRangeInterference = [&](ValueNode* src, ValueNode* dest,
const CombineLiveRangeOption option) {
CHECK_NE(src, nullptr);
CHECK_NE(dest, nullptr);
if (!use_region_analysis) {
VLOG(2) << "Configured to not use region-based analysis.";
return true;
}
*region_analysis_limit += live_range_size1 * live_range_size2;
if (ValuesInterfere(src, dest, option)) {
VLOG(2) << "Region-based interference is true.";
return true;
}
VLOG(2) << "Region-based interference is false.";
return false;
};
if (copy_node.src->next == copy_node.dest) {
VLOG(2) << copy->name() << " source and destination buffers are same.";
} else if (IsHead(*copy_node.dest)) {
VLOG(2) << copy->name() << " defines the first value in its buffer";
bool live_range_before =
CheckLiveRangeBefore(copy_node.src, Next(*copy_node.dest)) &&
CheckLiveRangeBefore(copy_node.dest->prev, Next(*copy_node.src));
VLOG(2) << "LiveRangeBefore result: " << live_range_before;
if (!live_range_before &&
CheckLiveRangeInterference(copy_node.src, copy_node.dest,
kMergeFirstDestInSource)) {
return false;
}
VLOG(2) << "Splice dest after source.";
SpliceAfter(copy_node.dest, copy_node.src);
} else if (IsTail(*copy_node.src)) {
VLOG(2) << copy->name() << " copies the last value ("
<< copy_node.src->value->ToShortString() << ") in its buffer";
bool live_range_before =
CheckLiveRangeBefore(Prev(*copy_node.dest), copy_node.src->next) &&
CheckLiveRangeBefore(copy_node.src, Next(*copy_node.dest));
VLOG(2) << "LiveRangeBefore result: " << live_range_before;
if (!live_range_before &&
CheckLiveRangeInterference(copy_node.src, copy_node.dest,
kMergeLastSourceInDest)) {
VLOG(2) << "Region-based analysis concludes interference.";
return false;
}
VLOG(2) << "Splice src after prev of dest.";
SpliceAfter(copy_node.src->next, Prev(*copy_node.dest));
} else {
VLOG(2) << copy->name()
<< " copies value in middle of source buffer to value in middle "
"of destination buffer";
return false;
}
RemoveCopyValue(copy_node.dest);
XLA_VLOG_LINES(4, ToString());
TF_DCHECK_OK(Verify());
return true;
}
void RemoveCopyValue(ValueNode* copy_value_node) {
CHECK_EQ(copy_value_node->value->defining_instruction()->opcode(),
HloOpcode::kCopy);
ValueNode* operand_node = copy_value_node->prev;
CHECK(operand_node != copy_value_node);
VLOG(2) << "Removing copy " << operand_node->value->ToShortString()
<< " => " << copy_value_node->value->ToShortString();
operand_node->next = copy_value_node->next;
copy_value_node->next->prev = operand_node;
auto it = absl::c_find_if(operand_node->uses, [copy_value_node](
const HloUse* use) {
return use->instruction == copy_value_node->value->defining_instruction();
});
CHECK(it != operand_node->uses.end());
operand_node->uses.erase(it);
for (const HloUse* copy_use : copy_value_node->uses) {
operand_node->uses.push_back(copy_use);
if (copy_use->instruction->opcode() == HloOpcode::kCopy &&
ContainsKey(copy_map_, copy_use->instruction)) {
copy_map_.at(copy_use->instruction).src = operand_node;
}
}
copy_map_.erase(copy_value_node->value->defining_instruction());
delete copy_value_node;
}
bool LiveRangeBefore(const ValueNode& a, const ValueNode& b) {
if (a.uses.empty()) {
VLOG(2) << "Empty uses for " << *a.value;
return ordering_->IsDefinedBefore(*a.value, *b.value);
}
VLOG(3) << "Checking live ranges before: " << ValueListToString(&a)
<< " vs " << ValueListToString(&b);
if (a.value->IsRootOf(b.value->defining_instruction()->parent())) {
VLOG(3) << "Value is root of the same computation";
return false;
}
return ordering_->UsesBeforeValueDefinition(
a.uses, *b.value, dataflow_,
false);
}
bool IsTail(const ValueNode& node) const {
return ContainsKey(value_lists_, node.next);
}
bool IsHead(const ValueNode& node) const {
return ContainsKey(value_lists_, &node);
}
ValueNode* Next(const ValueNode& node) const {
if (IsTail(node)) {
return nullptr;
} else {
return node.next;
}
}
ValueNode* Prev(const ValueNode& node) const {
if (IsHead(node)) {
return nullptr;
} else {
return node.prev;
}
}
void SpliceAfter(ValueNode* head, ValueNode* insert_after) {
DCHECK(IsHead(*head));
value_lists_.erase(head);
ValueNode* tail = head->prev;
tail->next = insert_after->next;
insert_after->next->prev = tail;
insert_after->next = head;
head->prev = insert_after;
}
enum CombineLiveRangeOption {
kMergeFirstDestInSource = 1,
kMergeLastSourceInDest = 2
};
bool ValuesInterfere(const ValueNode* src, const ValueNode* dest,
CombineLiveRangeOption merge_location) {
auto src_live_range = ComputeLiveRangeRegions(src);
auto dest_live_range = ComputeLiveRangeRegions(dest);
VLOG(5) << "src value: " << src->value->ToString();
VLOG(5) << "src live range:\n" << src_live_range.ToString();
VLOG(5) << "dest value: " << dest->value->ToString();
VLOG(5) << "dest live range:\n" << dest_live_range.ToString();
ComputeRelativeLocation relative_location_analysis(ordering_);
auto rel1 =
relative_location_analysis.Compute(src_live_range, dest_live_range);
VLOG(3) << "Location of dest in relation to src: " << rel1.ToString()
<< " with interception set to " << rel1.InterceptDefUse();
auto rel2 =
relative_location_analysis.Compute(dest_live_range, src_live_range);
VLOG(3) << "Location of src in relation to dest: " << rel2.ToString()
<< " with interception set to " << rel2.InterceptDefUse();
if (rel1.RuntimeOrderOverlap() && rel2.RuntimeOrderOverlap()) {
VLOG(3) << "Both relations are overlap.";
return true;
}
if (rel1.RuntimeOrderOverlap() || rel2.RuntimeOrderOverlap()) {
VLOG(3) << "At least one relation is overlap.";
if (rel1.RuntimeOrderOverlap()) {
VLOG(3) << "rel1 is overlap, with interception = "
<< rel1.InterceptDefUse();
if (rel1.InterceptDefUse() ||
(merge_location != kMergeFirstDestInSource &&
rel2.InterceptDefUse())) {
return true;
}
} else {
VLOG(3) << "rel2 is overlap, with interception = "
<< rel2.InterceptDefUse();
if (rel2.InterceptDefUse() ||
(merge_location != kMergeLastSourceInDest &&
rel1.InterceptDefUse())) {
return true;
}
}
}
if (relative_location_analysis.AddControlDependenceForUnorderedOps()) {
return false;
} else {
return true;
}
}
void ForEachValueInRange(const ValueNode* element,
absl::FunctionRef<void(const ValueNode*)> visitor) {
const ValueNode* head = element;
for (const ValueNode* p = head; p != nullptr; p = Next(*p)) {
visitor(p);
}
while (!IsHead(*head)) {
head = Prev(*head);
}
for (const ValueNode* p = head; p != element; p = Next(*p)) {
visitor(p);
}
}
std::string ValueListToString(const ValueNode* element) {
std::string result = "{";
auto VisitValueNode = [&](const ValueNode* node) {
if (result == "{") {
StrAppend(&result, node->value->ToShortString());
} else {
StrAppend(&result, ", ", node->value->ToShortString());
}
};
ForEachValueInRange(element, VisitValueNode);
StrAppend(&result, "}");
return result;
}
std::string ToString() const {
std::string out = absl::StrCat("CopyRemover:\n");
StrAppend(&out, " Def-use chains in each buffer:\n");
for (const ValueNode* head : value_lists_) {
StrAppend(&out, " Buffer defined by ", head->value->ToShortString(),
":\n");
const ValueNode* p = head;
do {
StrAppend(&out, " ", p->value->ToShortString(), ", uses: ",
absl::StrJoin(p->uses, "; ",
[](std::string* s, const HloUse* use) {
StrAppend(s, use->ToString());
}),
"\n");
p = p->next;
} while (p != head);
}
StrAppend(&out, " Potentially removable copies:\n");
for (const auto& pair : copy_map_) {
const HloInstruction* copy = pair.first;
const CopyNodes& copy_info = pair.second;
StrAppend(&out, " ", copy->name(), " : ",
copy_info.src->value->ToShortString(), " => ",
copy_info.dest->value->ToShortString(), "\n");
}
return out;
}
private:
const HloDataflowAnalysis& dataflow_;
HloOrdering* ordering_;
absl::flat_hash_set<const ValueNode*> value_lists_;
struct CopyNodes {
ValueNode* src = nullptr;
ValueNode* dest = nullptr;
};
absl::flat_hash_map<const HloInstruction*, CopyNodes> copy_map_;
};
}
absl::Status CopyInsertion::AddCopiesForConditional(
const HloAliasAnalysis& alias_analysis, HloInstruction* conditional) {
VLOG(2) << "Adding copies for kConditional instruction "
<< conditional->name();
ShapeTree<bool> indices_to_copy(conditional->shape());
TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);
if (!IndicesToCopyForConditional(alias_analysis.dataflow_analysis(),
conditional, &indices_to_copy)) {
VLOG(2) << "No copies necessary for kConditional instruction "
<< conditional->name();
return absl::OkStatus();
}
for (HloComputation* computation : conditional->branch_computations()) {
HloInstruction* root = computation->root_instruction();
std::vector<HloInstruction*> users = root->users();
TF_ASSIGN_OR_RETURN(
HloInstruction * deep_copy,
computation->DeepCopyInstruction(root, &indices_to_copy));
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(root->ReplaceUseWith(user, deep_copy));
}
computation->set_root_instruction(deep_copy);
}
return absl::OkStatus();
}
absl::Status CopyInsertion::AddCopiesToResolveInterference(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (computation->IsAsyncComputation()) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kWhile) {
TF_RETURN_IF_ERROR(AddCopiesForWhile(*alias_analysis, instruction));
} else if (instruction->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(
AddCopiesForConditional(*alias_analysis, instruction));
} else {
absl::flat_hash_set<int64_t> copied_operands;
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(
instruction->opcode() == HloOpcode::kAsyncStart
? instruction->async_wrapped_instruction()
: instruction)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (copied_operands.contains(operand_index.operand_number)) {
continue;
}
bool can_share_buffer = false;
if (can_share_buffer_ != nullptr) {
auto maybe_can_share_buffer = can_share_buffer_(
instruction, instruction->operand(operand_index.operand_number),
operand_index.operand_index);
if (maybe_can_share_buffer.has_value()) {
can_share_buffer = maybe_can_share_buffer.value();
}
}
if (can_share_buffer &&
HasDisjointReadWriteRegionsAttr(instruction) &&
absl::c_all_of(
instruction->operand(operand_index.operand_number)->users(),
[&instruction](const HloInstruction* user) {
return user == instruction;
})) {
continue;
}
copied_operands.insert(operand_index.operand_number);
TF_RETURN_IF_ERROR(AddCopiesForInPlaceOperation(
*alias_analysis, instruction, operand_index.operand_number));
}
}
}
}
TF_RETURN_IF_ERROR(
AddCopiesForAliasedInputOutputs(module, execution_threads));
return absl::OkStatus();
}
absl::Status CopyInsertion::AddSpecialCaseCopies(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
return AddSpecialCaseCopies(*call_graph, execution_threads, module);
}
absl::Status CopyInsertion::AddSpecialCaseCopies(
const CallGraph& call_graph,
const absl::flat_hash_set<absl::string_view>& execution_threads,
HloModule* module) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
HloInstructionMap<ShapeTree<bool>> instructions_to_copy;
auto add_index_to_copy = [&instructions_to_copy](HloInstruction* instruction,
const ShapeIndex& index) {
auto it = instructions_to_copy.find(instruction);
if (it == instructions_to_copy.end()) {
auto it_added = instructions_to_copy.emplace(
std::piecewise_construct, std::forward_as_tuple(instruction),
std::forward_as_tuple(instruction->shape(), false));
it = it_added.first;
}
*it->second.mutable_element(index) = true;
};
for (const HloValue* value : alias_analysis->dataflow_analysis().values()) {
HloBuffer& buffer = alias_analysis->GetBufferContainingValue(*value);
if (buffer.values().size() > 1 && ValueIsReadOnly(*value)) {
VLOG(2) << "Value " << value->ToShortString()
<< " is read only, but its buffer contains more than one value. "
"Copying.";
add_index_to_copy(value->defining_instruction(), value->defining_index());
}
for (const HloValue* value2 : buffer.values()) {
if (value2 == value) {
continue;
}
HloPosition position = value2->defining_position();
for (const HloUse& use : value->GetUses()) {
if (use.instruction == position.instruction) {
VLOG(3) << "Same instruction: " << position.instruction->ToString();
if (!alias_analysis->dataflow_analysis()
.CanShareOperandBufferWithUser(
use.instruction->mutable_operand(
use.operand_number),
use.operand_index,
position.instruction,
position.index)) {
VLOG(2) << "Adding back copy: "
<< use.instruction->operand(use.operand_number)->ToString()
<< "@" << use.operand_index.ToString()
<< " instr: " << position.instruction->ToString() << "@"
<< position.index;
add_index_to_copy(
use.instruction->mutable_operand(use.operand_number),
use.operand_index);
}
}
}
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
const CallGraphNode& node = call_graph.GetNode(computation);
if (node.context() == CallContext::kEmbedded) {
continue;
}
TF_RET_CHECK(node.context() == CallContext::kControlFlow);
SpecialCaseCopyPolicy policy =
GetSpecialCaseCopyPolicy(node, module, computation);
HloInstruction* root = computation->root_instruction();
absl::flat_hash_map<const HloBuffer*, ShapeIndex> seen;
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& , const ShapeIndex& index) {
std::vector<const HloBuffer*> buffers_at_index =
alias_analysis->ComputeBuffersAt(root, index);
bool buffer_seen_before = false;
for (const HloBuffer* buffer : buffers_at_index) {
buffer_seen_before |= !seen.emplace(buffer, index).second;
}
if (buffer_seen_before && policy.copy_root_replicated_buffers &&
computation == module->entry_computation() &&
module->input_output_alias_config().OutputHasAlias(index) &&
buffers_at_index.size() == 1) {
std::optional<HloInputOutputAliasConfig::Alias> alias =
module->input_output_alias_config().GetAliasedParameter(index);
CHECK(alias) << "Alias does not exist";
const ShapeIndex& other_index = seen[buffers_at_index[0]];
VLOG(2) << "Output indices " << index.ToString() << " and "
<< other_index.ToString() << " are both aliased to "
<< alias->parameter_number << " copying " << other_index;
add_index_to_copy(root, other_index);
return;
}
if (buffers_at_index.size() > 1 ||
(buffer_seen_before && policy.copy_root_replicated_buffers)) {
VLOG(2) << "Index " << index << " of computation "
<< computation->name() << " (" << root->name()
<< ") has ambiguous or non-distinct buffer. Copying.";
add_index_to_copy(root, index);
}
});
for (const auto& pair :
alias_analysis->dataflow_analysis().GetInstructionValueSet(root)) {
const ShapeIndex& index = pair.first;
const HloValueSet& value_set = pair.second;
for (const HloValue* value : value_set.values()) {
if (ShouldCopyRootValue(*value, policy)) {
VLOG(2) << "Root of (" << root->name() << ") of computation("
<< computation->name()
<< ") has constant or parameter value at index " << index
<< ". Copying.";
add_index_to_copy(root, index);
}
}
}
}
for (const auto& pair : instructions_to_copy) {
HloInstruction* instruction = pair.first;
const ShapeTree<bool>& indices_to_copy = pair.second;
ShapeTree<HloInstruction*> copies_added(indices_to_copy.shape());
std::vector<HloInstruction*> users = instruction->users();
TF_ASSIGN_OR_RETURN(HloInstruction * deep_copy,
instruction->parent()->DeepCopyInstruction(
instruction, &indices_to_copy, &copies_added));
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(instruction->ReplaceUseWith(user, deep_copy));
}
if (instruction == instruction->parent()->root_instruction()) {
instruction->parent()->set_root_instruction(deep_copy);
}
}
return absl::OkStatus();
}
static int64_t GetNumExistingCopies(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
int64_t num_existing_copies = 0;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
++num_existing_copies;
}
}
}
return num_existing_copies;
}
absl::Status CopyInsertion::RemoveUnnecessaryCopies(
HloModule* module, bool check_live_range_ordering,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
4, module->ToString(HloPrintOptions().set_syntax_sugar_async_ops(false)));
std::unique_ptr<HloOrdering> ordering;
if (module->has_schedule()) {
ordering = std::make_unique<SequentialHloOrdering>(module->schedule());
} else {
ordering = std::make_unique<DependencyHloOrdering>(module);
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
CopyRemover copy_remover(*module, *alias_analysis, ordering.get(),
check_live_range_ordering, execution_threads);
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Removing unnecessary copies in " << module->name();
LOG(INFO) << "Buffer values, in dependency order: ";
for (const HloBuffer& buffer : alias_analysis->buffers()) {
LOG(INFO) << " HloBuffer " << buffer.id();
}
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
int64_t num_existing_copies = GetNumExistingCopies(module, execution_threads);
bool changed = true;
int64_t num_iterations = -1;
VLOG(6) << "Copy Insertion analyzing module with instruction count = "
<< module->instruction_count();
BoundNonLinearCompilerAnalysis allowance(module, name(), 10);
while (changed) {
CHECK_LE(++num_iterations, num_existing_copies);
changed = false;
VLOG(2) << "Running fixpoint iteration " << num_iterations
<< " of copy elision";
for (HloComputation* computation :
module->computations(execution_threads)) {
VLOG(2) << "computation:" << computation->name();
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kCopy) continue;
int64_t region_analysis_cost_now =
(use_region_based_live_range_analysis_ == 0)
? 0
: std::min(allowance.analysis_allowance(),
use_region_based_live_range_analysis_);
if (copy_remover.TryElideCopy(instruction, ®ion_analysis_cost_now)) {
changed = true;
TF_RETURN_IF_ERROR(StripControlDependenciesFrom(instruction));
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(instruction->mutable_operand(0)));
VLOG(6) << "succeeded in eliminating copy.";
}
if (allowance.ContinueAnalysis() && region_analysis_cost_now > 0) {
VLOG(6) << "Copy Insertion analyzing module cost: "
<< region_analysis_cost_now;
VLOG(6) << "instruction:" << instruction->ToString();
allowance.DeductCost(region_analysis_cost_now);
VLOG(6) << "allowance:" << allowance.analysis_allowance();
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> CopyInsertion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
if (!call_graph->IsFlattened()) {
return FailedPrecondition(
"Call graph must be flattened before copy insertion.");
}
int64_t num_copies_before = GetNumExistingCopies(module, execution_threads);
TF_RETURN_IF_ERROR(AddCopiesToResolveInterference(module, execution_threads));
TupleSimplifier tuple_simplifier;
HloDCE dce;
TF_RETURN_IF_ERROR(tuple_simplifier.Run(module, execution_threads).status());
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
DumpHloModuleDuringPassIfEnabled(
name(), "after adding copies to resolve interference", *module);
TF_RETURN_IF_ERROR(RemoveUnnecessaryCopies(module,
true,
execution_threads));
DumpHloModuleDuringPassIfEnabled(name(), "after removing unnecessary copies",
*module);
TF_RETURN_IF_ERROR(
AddSpecialCaseCopies(*call_graph, execution_threads, module));
DumpHloModuleDuringPassIfEnabled(name(), "after adding special-case copies",
*module);
TF_RETURN_IF_ERROR(tuple_simplifier.Run(module, execution_threads).status());
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
VLOG(1) << "Num copies before copy-insertion: " << num_copies_before;
VLOG(1) << "Num copies after copy-insertion: "
<< GetNumExistingCopies(module, execution_threads);
return true;
}
} | #include "xla/service/copy_insertion.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ::testing::NotNull;
using ::testing::UnorderedElementsAre;
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
int64_t CountControlEdges(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountControlEdges(*computation);
}
return count;
}
class CopyInsertionTest : public HloTestBase {
protected:
void InsertCopies(HloModule* module) {
CopyInsertion copy_insertion;
VLOG(3) << "Before copy inser: " << module->ToString();
ASSERT_IS_OK(copy_insertion.Run(module).status());
VLOG(2) << "After copy inser: " << module->ToString();
}
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(CopyInsertionTest, SingleParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({x}));
EXPECT_THAT(x->users(), UnorderedElementsAre(tuple));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(x)));
}
TEST_F(CopyInsertionTest, SingleConstant) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant}));
EXPECT_THAT(constant->users(), UnorderedElementsAre(tuple));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(constant)));
}
TEST_F(CopyInsertionTest, ExistingCopiesNotRemoved) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}})));
auto minor_to_major = LayoutUtil::MinorToMajor(constant->shape());
Layout reversed_layout =
LayoutUtil::MakeLayoutFromMajorToMinor(minor_to_major);
Shape copy_shape = constant->shape();
*copy_shape.mutable_layout() = reversed_layout;
HloInstruction* copy_1 = builder.AddInstruction(
HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* copy_2 = builder.AddInstruction(
HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, copy_1, copy_2));
builder.AddInstruction(
HloInstruction::CreateUnary(add->shape(), HloOpcode::kCopy, add));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(CountCopies(*module), 3);
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_EQ(module->entry_computation()->root_instruction(), add);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(CopyInsertionTest, MultipleConstantsAndParameters) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
HloInstruction* y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "y"));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, constant1, y));
builder.AddInstruction(HloInstruction::CreateTuple({constant2, x, add}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(constant2), op::Copy(x), op::Add(constant1, y)));
}
TEST_F(CopyInsertionTest, BitcastParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {4}), "x"));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2, 2}), x));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(x->users(), UnorderedElementsAre(bitcast));
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(old_root));
}
TEST_F(CopyInsertionTest, BitcastConstant) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 42.0})));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2}), constant));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(constant->users(), UnorderedElementsAre(bitcast));
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(old_root));
}
TEST_F(CopyInsertionTest, BitcastTupleElementParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {4}), "x"));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2, 2}), x));
builder.AddInstruction(HloInstruction::CreateTuple({bitcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(x->users(), UnorderedElementsAre(bitcast));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(bitcast)));
}
TEST_F(CopyInsertionTest, NestedTupleParameter) {
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {1, 2, 3})}),
ShapeUtil::MakeShape(F32, {42})}),
"param0"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(HloOpcode::kParameter,
module->entry_computation()->root_instruction()->opcode());
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 3);
HloInstruction* new_root = module->entry_computation()->root_instruction();
EXPECT_NE(old_root, new_root);
EXPECT_THAT(
new_root,
op::Tuple(
op::Tuple(
op::Copy(op::GetTupleElement(op::GetTupleElement(old_root))),
op::Copy(op::GetTupleElement(op::GetTupleElement(old_root)))),
op::Copy(op::GetTupleElement(old_root))));
}
TEST_F(CopyInsertionTest, ElementOfNestedTupleParameter) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {1, 2, 3})}),
ShapeUtil::MakeShape(F32, {42})}),
"param0"));
auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(param->shape(), {0}), param, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gte, module->entry_computation()->root_instruction());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(op::GetTupleElement(op::GetTupleElement(param))),
op::Copy(op::GetTupleElement(op::GetTupleElement(param)))));
}
class WhileCopyInsertionTest : public CopyInsertionTest {
protected:
WhileCopyInsertionTest() : module_(CreateNewVerifiedModule()) {}
std::unique_ptr<HloComputation> BuildConditionComputation(
const Shape& loop_state_shape) {
auto builder = HloComputation::Builder(TestName() + ".Condition");
auto limit_const = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(10)));
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
limit_const->shape(), loop_state, 0));
builder.AddInstruction(HloInstruction::CreateCompare(
condition_result_shape_, induction_variable, limit_const,
ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
auto data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, induction_variable));
auto update = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, convert, {}));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyComputation2() {
auto builder = HloComputation::Builder(TestName() + ".Body");
const Shape& loop_state_shape = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, data_shape_, data_shape_});
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
HloInstruction* data1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
HloInstruction* data2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 2));
builder.AddInstruction(HloInstruction::CreateTuple({add0, data1, data2}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyOneReadOnlyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, induction_variable));
auto update = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, convert, {}));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
builder.AddInstruction(
HloInstruction::CreateTuple({induction_variable, add1}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildIndependentBodyComputation(
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".Body");
const Shape& loop_state_shape =
nested ? nested_loop_state_shape_ : loop_state_shape_;
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
HloInstruction* data = nullptr;
if (nested) {
data = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
nested_tuple_shape_, loop_state, 1));
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, data, 0));
} else {
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
}
auto update = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
if (nested) {
auto nested_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add1, add1}));
builder.AddInstruction(HloInstruction::CreateTuple({add0, nested_tuple}));
} else {
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
}
return builder.Build();
}
std::unique_ptr<HloComputation> BuildNestedBodyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(HloInstruction::CreateParameter(
0, nested_loop_state_shape_, "loop_state"));
auto gte0 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
gte0->shape(), HloOpcode::kAdd, gte0, inc));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
nested_tuple_shape_, loop_state, 1));
auto gte10 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 0));
auto update10 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add10 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, gte10, update10));
auto gte11 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 1));
auto rev11 = builder.AddInstruction(
HloInstruction::CreateReverse(data_shape_, gte11, {0}));
auto inner_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add10, rev11}));
builder.AddInstruction(HloInstruction::CreateTuple({add0, inner_tuple}));
return builder.Build();
}
HloInstruction* BuildWhileInstruction(HloComputation* condition,
HloComputation* body,
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".While");
auto induction_var_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
if (nested) {
auto inner_init = builder.AddInstruction(
HloInstruction::CreateTuple({data_init, data_init}));
auto loop_state_init = builder.AddInstruction(
HloInstruction::CreateTuple({induction_var_init, inner_init}));
auto while_hlo = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_init->shape(), condition, body, loop_state_init));
module_->AddEntryComputation(builder.Build());
return while_hlo;
}
auto loop_state_init = builder.AddInstruction(
HloInstruction::CreateTuple({induction_var_init, data_init}));
auto while_hlo = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition, body, loop_state_init));
module_->AddEntryComputation(builder.Build());
return while_hlo;
}
HloInstruction* BuildWhileInstruction_InitPointsToConstant() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto data_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToParameter() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto data_init = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "data_init"));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToNonDistinct() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto data_init =
builder.AddInstruction(HloInstruction::CreateTuple({one_vec, one_vec}));
return BuildWhileInstructionWithCustomInit(nested_loop_state_shape_,
data_init, &builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToInterfering() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data_init, one_vec));
auto xla_while = BuildWhileInstructionWithCustomInit(loop_state_shape_,
data_init, &builder);
auto gte = xla_while->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(xla_while->shape(), {1}), xla_while, 1));
auto sub = xla_while->parent()->AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kSubtract, add, gte));
auto gte0 = xla_while->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(xla_while->shape(), {0}), xla_while, 0));
auto tuple = xla_while->parent()->AddInstruction(
HloInstruction::CreateTuple({gte0, sub}));
xla_while->parent()->set_root_instruction(tuple);
return xla_while;
}
HloInstruction* BuildWhileInstructionWithCustomInit(
const Shape& loop_state_shape, HloInstruction* data_init,
HloComputation::Builder* builder) {
const bool nested =
ShapeUtil::Equal(loop_state_shape, nested_loop_state_shape_);
auto induction_var_init = builder->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto body = module_->AddEmbeddedComputation(
BuildIndependentBodyComputation(nested));
auto loop_state_init = builder->AddInstruction(
HloInstruction::CreateTuple({induction_var_init, data_init}));
auto while_hlo = builder->AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition, body, loop_state_init));
module_->AddEntryComputation(builder->Build());
return while_hlo;
}
std::unique_ptr<HloModule> module_;
Shape induction_variable_shape_ = ShapeUtil::MakeShape(S32, {});
Shape data_shape_ = ShapeUtil::MakeShape(F32, {8});
Shape loop_state_shape_ =
ShapeUtil::MakeTupleShape({induction_variable_shape_, data_shape_});
Shape nested_tuple_shape_ =
ShapeUtil::MakeTupleShape({data_shape_, data_shape_});
Shape nested_loop_state_shape_ = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, nested_tuple_shape_});
Shape condition_result_shape_ = ShapeUtil::MakeShape(PRED, {});
};
TEST_F(WhileCopyInsertionTest, IndependentTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body =
module_->AddEmbeddedComputation(BuildIndependentBodyComputation());
auto while_hlo = BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountControlEdges(*module_), 0);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterWithCopies) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8])) -> (s32[], f32[8]) {
%loop_state.1 = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8])) -> pred[] {
%loop_state = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}) while((s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_->AddEmbeddedComputation(while_hlo->while_body()->Clone());
HloInstruction* outer_while =
while_hlo->parent()->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), outer_while_condition, outer_while_body,
while_hlo->mutable_operand(0)));
HloInstruction* outer_param = outer_while_body->parameter_instruction(0);
std::vector<HloInstruction*> materialized_gtes;
for (int i = 0; i < outer_param->shape().tuple_shapes_size(); ++i) {
materialized_gtes.push_back(
outer_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
outer_param->shape().tuple_shapes(i), outer_param, i)));
}
HloInstruction* dual_init = outer_while_body->AddInstruction(
HloInstruction::CreateTuple(materialized_gtes));
HloInstruction* dual_while =
outer_while_body->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), while_hlo->while_condition(),
while_hlo->while_body(), dual_init));
TF_CHECK_OK(outer_while_body->ReplaceInstruction(
outer_while_body->root_instruction(), dual_while));
TF_CHECK_OK(while_hlo->parent()->ReplaceInstruction(while_hlo, outer_while));
InsertCopies(module_.get());
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterNoCopies) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8])) -> (s32[], f32[8]) {
%loop_state.1 = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8])) -> pred[] {
%loop_state = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}) while((s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_->AddEmbeddedComputation(while_hlo->while_body()->Clone());
HloInstruction* outer_while =
while_hlo->parent()->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), outer_while_condition, outer_while_body,
while_hlo->mutable_operand(0)));
HloInstruction* outer_param = outer_while_body->parameter_instruction(0);
HloInstruction* dual_while =
outer_while_body->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), while_hlo->while_condition(),
while_hlo->while_body(), outer_param));
TF_CHECK_OK(outer_while_body->ReplaceInstruction(
outer_while_body->root_instruction(), dual_while));
TF_CHECK_OK(while_hlo->parent()->ReplaceInstruction(while_hlo, outer_while));
InsertCopies(module_.get());
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterBig) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0})) -> (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) {
%loop_state.1 = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0})) -> pred[] {
%loop_state = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) while( (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_->AddEmbeddedComputation(while_hlo->while_body()->Clone());
HloInstruction* outer_while =
while_hlo->parent()->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), outer_while_condition, outer_while_body,
while_hlo->mutable_operand(0)));
HloInstruction* outer_param = outer_while_body->parameter_instruction(0);
std::vector<HloInstruction*> materialized_gtes;
for (int i = 0; i < outer_param->shape().tuple_shapes_size(); ++i) {
materialized_gtes.push_back(
outer_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
outer_param->shape().tuple_shapes(i), outer_param, i)));
}
HloInstruction* dual_init = outer_while_body->AddInstruction(
HloInstruction::CreateTuple(materialized_gtes));
HloInstruction* dual_while =
outer_while_body->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), while_hlo->while_condition(),
while_hlo->while_body(), dual_init));
TF_CHECK_OK(outer_while_body->ReplaceInstruction(
outer_while_body->root_instruction(), dual_while));
TF_CHECK_OK(while_hlo->parent()->ReplaceInstruction(while_hlo, outer_while));
InsertCopies(module_.get());
}
TEST_F(WhileCopyInsertionTest, DependentTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body = module_->AddEmbeddedComputation(BuildDependentBodyComputation());
auto while_hlo = BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 1);
EXPECT_EQ(CountControlEdges(*body), 0);
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Add(), op::Add(op::GetTupleElement(), op::Broadcast())));
auto add = body->root_instruction()->operand(0);
auto bcast = body->root_instruction()->operand(1)->operand(1);
ASSERT_EQ(add->opcode(), HloOpcode::kAdd);
ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_THAT(while_hlo->while_body()->root_instruction(),
op::Tuple(op::Add(op::Copy(), op::Constant()),
op::Add(op::GetTupleElement(),
op::Broadcast(op::Convert(op::Copy())))));
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, DependentTupleElements_OneReadOnly) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountControlEdges(*body), 0);
}
TEST_F(WhileCopyInsertionTest,
DependentTupleElements_OneReadOnly_TwoLoops_EntryParams) {
auto condition1 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto condition2 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body1 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto body2 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto builder = HloComputation::Builder(TestName() + ".While");
auto iter_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, induction_variable_shape_, "iter"));
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "data"));
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({iter_param, data_param}));
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition1, body1, loop_init));
auto while_hlo2 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition2, body2, loop_init));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo1, 0));
auto gte2 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo2->shape(), {0}), while_hlo2, 0));
builder.AddInstruction(
HloInstruction::CreateBinary(gte1->shape(), HloOpcode::kAdd, gte1, gte2));
auto entry = module_->AddEntryComputation(builder.Build());
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body1), 0);
EXPECT_EQ(CountCopies(*body2), 0);
EXPECT_EQ(CountControlEdges(*body1), 0);
EXPECT_EQ(CountControlEdges(*body2), 0);
EXPECT_EQ(CountCopies(*entry), 2);
EXPECT_EQ(while_hlo1->operand(0)->operand(1)->opcode(), HloOpcode::kCopy);
EXPECT_EQ(while_hlo2->operand(0)->operand(1)->opcode(), HloOpcode::kCopy);
EXPECT_NE(while_hlo1->operand(0)->operand(1),
while_hlo2->operand(0)->operand(1));
}
TEST_F(WhileCopyInsertionTest,
DependentTupleElements_OneReadOnly_TwoLoops_NonParams) {
auto condition1 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto condition2 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body1 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto body2 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto builder = HloComputation::Builder(TestName() + ".While");
auto iter_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, induction_variable_shape_, "iter"));
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "data"));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, iter_param));
auto iter_value = builder.AddInstruction(
HloInstruction::CreateUnary(convert->shape(), HloOpcode::kExp, convert));
auto convert2 = builder.AddInstruction(
HloInstruction::CreateConvert(induction_variable_shape_, iter_value));
auto data_value = builder.AddInstruction(HloInstruction::CreateUnary(
data_param->shape(), HloOpcode::kExp, data_param));
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({convert2, data_value}));
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition1, body1, loop_init));
auto while_hlo2 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition2, body2, loop_init));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo1, 0));
auto gte2 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo2->shape(), {0}), while_hlo2, 0));
builder.AddInstruction(
HloInstruction::CreateBinary(gte1->shape(), HloOpcode::kAdd, gte1, gte2));
auto entry = module_->AddEntryComputation(builder.Build());
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*entry), 2);
EXPECT_THAT(while_hlo1->operand(0),
op::Tuple(op::Convert(op::Exp()), op::Copy(op::Exp())));
EXPECT_THAT(while_hlo2->operand(0),
op::Tuple(op::Convert(op::Exp()), op::Copy(op::Exp())));
}
TEST_F(WhileCopyInsertionTest, NestedTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(nested_loop_state_shape_));
auto body = module_->AddEmbeddedComputation(BuildNestedBodyComputation());
BuildWhileInstruction(condition, body, true);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 1);
EXPECT_EQ(CountCopies(*module_), 4);
if (body->root_instruction()->operand(1)->operand(1)->opcode() ==
HloOpcode::kCopy) {
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Add(), op::Copy(op::Reverse()))));
} else {
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Add(), op::Reverse(op::Copy()))));
}
}
TEST_F(WhileCopyInsertionTest, InitPointsToConstant) {
auto while_hlo = BuildWhileInstruction_InitPointsToConstant();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 0);
EXPECT_EQ(CountCopies(*module_), 2);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, InitPointsToParameter) {
auto while_hlo = BuildWhileInstruction_InitPointsToParameter();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 0);
EXPECT_EQ(CountCopies(*module_), 2);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Parameter())));
}
TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinct) {
auto while_hlo = BuildWhileInstruction_InitPointsToNonDistinct();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*module_->entry_computation()), 2);
if (while_hlo->operand(0)->operand(1)->operand(0)->opcode() ==
HloOpcode::kCopy) {
EXPECT_THAT(
while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()),
op::Tuple(op::Copy(op::Broadcast()), op::Broadcast())));
} else {
EXPECT_THAT(
while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()),
op::Tuple(op::Broadcast(), op::Copy(op::Broadcast()))));
}
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 1);
if (while_hlo->while_body()
->root_instruction()
->operand(1)
->operand(0)
->opcode() == HloOpcode::kCopy) {
EXPECT_THAT(
while_hlo->while_body()->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Copy(op::Add()), op::Add())));
} else {
EXPECT_THAT(
while_hlo->while_body()->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Add(), op::Copy(op::Add()))));
}
}
TEST_F(WhileCopyInsertionTest, InitPointsToInterfering) {
auto while_hlo = BuildWhileInstruction_InitPointsToInterfering();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*module_), 2);
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 0);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Broadcast())));
}
TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinctUsedByTwoWhileLoops) {
const Shape& loop_state_shape = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, data_shape_, data_shape_});
auto condition1 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto condition2 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto body1 =
module_->AddEmbeddedComputation(BuildDependentBodyComputation2());
auto body2 =
module_->AddEmbeddedComputation(BuildDependentBodyComputation2());
auto builder = HloComputation::Builder(TestName() + ".While");
auto iter_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, induction_variable_shape_, "iter"));
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "data"));
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({iter_param, data_param, data_param}));
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition1, body1, loop_init));
auto while_hlo2 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition2, body2, loop_init));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo1, 0));
auto gte2 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo2, 0));
builder.AddInstruction(
HloInstruction::CreateBinary(gte1->shape(), HloOpcode::kAdd, gte1, gte2));
module_->AddEntryComputation(builder.Build());
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body1), 0);
EXPECT_EQ(CountCopies(*body2), 0);
EXPECT_EQ(CountCopies(*module_->entry_computation()), 2);
EXPECT_THAT(while_hlo1->operand(0),
op::Tuple(op::Copy(), op::Parameter(), op::Parameter()));
EXPECT_THAT(while_hlo2->operand(0),
op::Tuple(op::Copy(), op::Parameter(), op::Parameter()));
}
TEST_F(CopyInsertionTest, SwizzlingWhile) {
auto module = CreateNewVerifiedModule();
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_1, body_element_0}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape, condition, body, tuple));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 6);
EXPECT_EQ(CountCopies(*body), 4);
EXPECT_EQ(CountControlEdges(*body), 2);
EXPECT_THAT(body->root_instruction(),
op::Tuple(op::Copy(op::Copy()), op::Copy(op::Copy())));
EXPECT_EQ(CountCopies(*module->entry_computation()), 2);
EXPECT_THAT(xla_while->operand(0), op::Tuple(op::Copy(), op::Copy()));
}
TEST_F(CopyInsertionTest, CrossingParameters) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte1, gte0}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4);
}
TEST_F(CopyInsertionTest, ParametersAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, ParameterWithNoAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(op::GetTupleElement(param, 0)),
op::Copy(op::GetTupleElement(param, 1))));
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, ParameterWithPartialAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(param, 0),
op::Copy(op::GetTupleElement(param, 1))));
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, ParameterAndParallelOpsWithPartialAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, ParameterAndOpsWithPartialAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, negate0, negate1));
builder.AddInstruction(HloInstruction::CreateTuple({add, negate1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, SwizzlingWhileWithOneOp) {
auto module = CreateNewVerifiedModule();
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({negate, body_element_0}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape, condition, body, tuple));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 6);
EXPECT_EQ(CountCopies(*body), 4);
EXPECT_EQ(CountControlEdges(*body), 2);
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Copy(op::Negate(op::Copy())), op::Copy(op::Copy())));
EXPECT_EQ(CountCopies(*module->entry_computation()), 2);
EXPECT_THAT(xla_while->operand(0), op::Tuple(op::Copy(), op::Copy()));
}
TEST_F(CopyInsertionTest, SwizzlingWhileSharedInput) {
auto module = CreateNewVerifiedModule();
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_1, body_element_0}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape, condition, body, tuple));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountCopies(*module->entry_computation()), 2);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(), op::Copy()));
}
TEST_F(CopyInsertionTest, SequentialWhiles) {
const Shape element_shape = ShapeUtil::MakeShape(F32, {42});
const Shape loop_state_shape = ShapeUtil::MakeTupleShape(
{element_shape, element_shape, element_shape, element_shape});
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param_0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, element_shape, "param_0"));
auto param_1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, element_shape, "param_1"));
auto param_2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, element_shape, "param_2"));
auto param_3 = builder.AddInstruction(
HloInstruction::CreateParameter(3, element_shape, "param_3"));
const int kNumWhiles = 3;
HloInstruction* prev_element_1 = param_1;
HloInstruction* prev_element_2 = param_2;
HloInstruction* prev_element_3 = param_3;
std::vector<const HloInstruction*> whiles;
for (int i = 0; i < kNumWhiles; ++i) {
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 1));
auto body_element_2 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 2));
auto body_element_3 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 3));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
element_shape, HloOpcode::kNegate, body_element_2));
auto reverse = body_builder.AddInstruction(
HloInstruction::CreateReverse(element_shape, body_element_3, {0}));
body_builder.AddInstruction(HloInstruction::CreateTuple(
{body_element_0, body_element_1, negate, reverse}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto while_init = builder.AddInstruction(HloInstruction::CreateTuple(
{param_0, prev_element_1, prev_element_2, prev_element_3}));
auto xla_while = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition, body, while_init));
whiles.push_back(xla_while);
if (i != kNumWhiles - 1) {
prev_element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, xla_while, 1));
prev_element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, xla_while, 2));
prev_element_3 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, xla_while, 3));
}
}
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4 + kNumWhiles);
for (const HloInstruction* xla_while : whiles) {
EXPECT_EQ(CountCopies(*xla_while->while_body()), 1);
}
EXPECT_THAT(whiles[0]->operand(0), op::Tuple(op::Parameter(), op::Parameter(),
op::Copy(), op::Copy()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(), op::Copy(), op::GetTupleElement(),
op::GetTupleElement()));
}
TEST_F(CopyInsertionTest, WhileBodyWithConstantRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param_0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param_0"));
auto body_builder = HloComputation::Builder("body");
body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0)));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape_, condition, body, param_0));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(xla_while->operand(0), op::Copy(op::Parameter()));
EXPECT_THAT(body->root_instruction(), op::Copy(op::Constant()));
EXPECT_THAT(condition->root_instruction(), op::Constant());
}
TEST_F(CopyInsertionTest, TokensShouldNotBeCopied) {
std::string module_string = R"(
HloModule TokensShouldNotBeCopied
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %TokensShouldNotBeCopied () -> s32[] {
%one = s32[] constant(1)
%negative_one = s32[] negate(%one)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %negative_one, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
std::unique_ptr<HloComputation> MakeTrivialCondition(const Shape& shape) {
auto builder = HloComputation::Builder("trivial_condition");
builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "loop_state"));
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNot, constant));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeBenchmarkWhileBody() {
auto builder = HloComputation::Builder("benchmark_loop_body");
const Shape element_shape = ShapeUtil::MakeShape(F32, {42});
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({element_shape, element_shape, element_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
HloInstruction* element_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, 0));
HloInstruction* element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, 1));
HloInstruction* element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, 2));
HloInstruction* rev_1 = builder.AddInstruction(
HloInstruction::CreateReverse(element_shape, element_1, {0}));
HloInstruction* add_1_2 = builder.AddInstruction(HloInstruction::CreateBinary(
element_shape, HloOpcode::kAdd, element_1, element_2));
builder.AddInstruction(
HloInstruction::CreateTuple({element_0, rev_1, add_1_2}));
return builder.Build();
}
void BM_SequentialWhiles(::testing::benchmark::State& state) {
const int num_whiles = state.range(0);
for (auto s : state) {
state.PauseTiming();
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
HloModule module("BM_SequentialWhiles", config);
auto builder = HloComputation::Builder("BM_SequentialWhiles");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {42}), "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {42}), "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {42}), "z"));
HloInstruction* init =
builder.AddInstruction(HloInstruction::CreateTuple({x, y, z}));
HloInstruction* prev_loop_state = init;
for (int w = 0; w < num_whiles; ++w) {
HloComputation* condition =
module.AddEmbeddedComputation(MakeTrivialCondition(init->shape()));
HloComputation* body =
module.AddEmbeddedComputation(MakeBenchmarkWhileBody());
prev_loop_state = builder.AddInstruction(HloInstruction::CreateWhile(
init->shape(), condition, body, prev_loop_state));
}
module.AddEntryComputation(builder.Build());
CopyInsertion copy_insertion;
state.ResumeTiming();
ASSERT_IS_OK(copy_insertion.Run(&module).status());
state.PauseTiming();
ASSERT_EQ(CountCopies(module), 3 + num_whiles);
state.ResumeTiming();
}
}
void BM_ParallelWhiles(::testing::benchmark::State& state) {
const int num_whiles = state.range(0);
for (auto s : state) {
state.PauseTiming();
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
HloModule module("BM_SequentialWhiles", config);
auto builder = HloComputation::Builder("BM_ParallelWhiles");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {42}), "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {42}), "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {42}), "z"));
HloInstruction* init =
builder.AddInstruction(HloInstruction::CreateTuple({x, y, z}));
HloInstruction* sum = nullptr;
for (int w = 0; w < num_whiles; ++w) {
HloComputation* condition =
module.AddEmbeddedComputation(MakeTrivialCondition(init->shape()));
HloComputation* body =
module.AddEmbeddedComputation(MakeBenchmarkWhileBody());
HloInstruction* xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(init->shape(), condition, body, init));
if (sum == nullptr) {
sum = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(x->shape(), xla_while, 0));
} else {
HloInstruction* element_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(x->shape(), xla_while, 0));
sum = builder.AddInstruction(HloInstruction::CreateBinary(
x->shape(), HloOpcode::kAdd, sum, element_0));
}
}
module.AddEntryComputation(builder.Build());
CopyInsertion copy_insertion;
state.ResumeTiming();
ASSERT_IS_OK(copy_insertion.Run(&module).status());
state.PauseTiming();
ASSERT_EQ(CountCopies(module), 3 * num_whiles);
}
}
std::unique_ptr<HloComputation> MakeBenchmarkWhileBody(
const int num_tuple_inputs) {
auto builder = HloComputation::Builder("benchmark_loop_body");
const Shape element_shape = ShapeUtil::MakeShape(F32, {});
std::vector<Shape> input_shape(num_tuple_inputs, element_shape);
const Shape loop_state_shape = ShapeUtil::MakeTupleShape(input_shape);
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
std::vector<HloInstruction*> gte_nodes(num_tuple_inputs);
for (int i = 0; i < num_tuple_inputs; ++i) {
gte_nodes[i] = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, i));
}
builder.AddInstruction(HloInstruction::CreateTuple(gte_nodes));
return builder.Build();
}
void BM_ManyElementTuple(::testing::benchmark::State& state) {
const int num_tuple_inputs = state.range(0);
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
CopyInsertion copy_insertion;
const Shape element_shape = ShapeUtil::MakeShape(F32, {});
std::vector<HloInstruction*> tuple_params(num_tuple_inputs);
for (auto s : state) {
state.PauseTiming();
auto builder = HloComputation::Builder("BM_ParallelWhiles");
HloModule module("BM_ManyElementTuple", config);
for (int j = 0; j < num_tuple_inputs; ++j) {
tuple_params[j] = builder.AddInstruction(
HloInstruction::CreateParameter(j, element_shape, ""));
}
HloInstruction* init =
builder.AddInstruction(HloInstruction::CreateTuple(tuple_params));
HloComputation* condition =
module.AddEmbeddedComputation(MakeTrivialCondition(init->shape()));
HloComputation* body =
module.AddEmbeddedComputation(MakeBenchmarkWhileBody(num_tuple_inputs));
HloInstruction* xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(init->shape(), condition, body, init));
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeShape(F32, {}), xla_while, 0));
module.AddEntryComputation(builder.Build());
state.ResumeTiming();
ASSERT_IS_OK(copy_insertion.Run(&module).status());
}
}
BENCHMARK(BM_SequentialWhiles)->Arg(512)->Arg(1024)->Arg(2048)->Arg(4096);
BENCHMARK(BM_ParallelWhiles)->Arg(512)->Arg(1024)->Arg(2048)->Arg(4096);
BENCHMARK(BM_ManyElementTuple)->Arg(1024)->Arg(12288);
TEST_F(CopyInsertionTest, SimpleControlFlowTest) {
const std::string& hlo_string = R"(
HloModule TestModule
if-body.v5 {
constant.3 = s32[] constant(-1)
p.1 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.18 = (s32[], s32[], s32[]) get-tuple-element(p.1), index=1
get-tuple-element.65 = s32[] get-tuple-element(get-tuple-element.18), index=0
get-tuple-element.66 = s32[] get-tuple-element(get-tuple-element.18), index=1
add.3 = s32[] add(get-tuple-element.65, get-tuple-element.66)
tuple.33 = (s32[]) tuple(add.3)
ROOT tuple.34 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(constant.3, get-tuple-element.18, tuple.33)
}
if-condition.v4 {
p.2 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(p.2), index=0
constant.4 = s32[] constant(0)
ROOT equal-to = pred[] compare(get-tuple-element.67, constant.4), direction=EQ
}
_functionalize_body_1__.v28 {
arg_tuple.4 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.68 = s32[] get-tuple-element(arg_tuple.4), index=0
constant.7 = s32[] constant(1)
add.4 = s32[] add(get-tuple-element.68, constant.7)
get-tuple-element.69 = s32[] get-tuple-element(arg_tuple.4), index=1
get-tuple-element.70 = s32[] get-tuple-element(arg_tuple.4), index=2
less-than-or-equal-to = pred[] compare(get-tuple-element.69, get-tuple-element.70), direction=LE
constant.8 = s32[] constant(0)
select = s32[] select(less-than-or-equal-to, constant.8, constant.7)
get-tuple-element.71 = s32[] get-tuple-element(arg_tuple.4), index=3
tuple.35 = (s32[], s32[], s32[]) tuple(get-tuple-element.69, get-tuple-element.71, get-tuple-element.70)
tuple.36 = (s32[]) tuple(constant.8)
tuple.37 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(select, tuple.35, tuple.36)
while = (s32[], (s32[], s32[], s32[]), (s32[])) while(tuple.37), condition=if-condition.v4, body=if-body.v5
get-tuple-element.72 = (s32[]) get-tuple-element(while), index=2
get-tuple-element.73 = s32[] get-tuple-element(get-tuple-element.72), index=0
ROOT tuple.38 = (s32[], s32[], s32[], s32[]) tuple(add.4, get-tuple-element.69, get-tuple-element.70, get-tuple-element.73)
}
cond_wrapper.v3.1 {
inputs.1 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.75 = s32[] get-tuple-element(inputs.1), index=0
constant.11 = s32[] constant(7)
ROOT less-than.2 = pred[] compare(get-tuple-element.75, constant.11), direction=LT
}
_functionalize_body_2__.v25 {
arg_tuple.5 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.76 = s32[] get-tuple-element(arg_tuple.5), index=0
get-tuple-element.77 = s32[] get-tuple-element(arg_tuple.5), index=2
get-tuple-element.78 = s32[] get-tuple-element(arg_tuple.5), index=3
get-tuple-element.79 = s32[] get-tuple-element(arg_tuple.5), index=4
tuple.39 = (s32[], s32[], s32[], s32[]) tuple(get-tuple-element.76, get-tuple-element.77, get-tuple-element.78, get-tuple-element.79)
while.2 = (s32[], s32[], s32[], s32[]) while(tuple.39), condition=cond_wrapper.v3.1, body=_functionalize_body_1__.v28
get-tuple-element.80 = s32[] get-tuple-element(while.2), index=0
get-tuple-element.81 = s32[] get-tuple-element(arg_tuple.5), index=1
constant.12 = s32[] constant(1)
add.5 = s32[] add(get-tuple-element.81, constant.12)
get-tuple-element.82 = s32[] get-tuple-element(while.2), index=3
ROOT tuple.40 = (s32[], s32[], s32[], s32[], s32[]) tuple(get-tuple-element.80, add.5, get-tuple-element.77, get-tuple-element.78, get-tuple-element.82)
}
cond_wrapper.v3.2 {
inputs.2 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.83 = s32[] get-tuple-element(inputs.2), index=1
constant.13 = s32[] constant(5)
ROOT less-than.3 = pred[] compare(get-tuple-element.83, constant.13), direction=LT
}
ENTRY TestComputation {
arg_tuple.6 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
ROOT while.3 = (s32[], s32[], s32[], s32[], s32[]) while(arg_tuple.6), condition=cond_wrapper.v3.2, body=_functionalize_body_2__.v25
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
InsertCopies(module.get());
}
TEST_F(CopyInsertionTest, ControlFlowTest) {
const std::string& hlo_string = R"(
HloModule TestModule
if-body.v5 {
constant.3 = s32[] constant(-1)
p.1 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.18 = (s32[], s32[], s32[]) get-tuple-element(p.1), index=1
get-tuple-element.65 = s32[] get-tuple-element(get-tuple-element.18), index=0
get-tuple-element.66 = s32[] get-tuple-element(get-tuple-element.18), index=1
add.3 = s32[] add(get-tuple-element.65, get-tuple-element.66)
tuple.33 = (s32[]) tuple(add.3)
ROOT tuple.34 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(constant.3, get-tuple-element.18, tuple.33)
}
if-condition.v4 {
p.2 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(p.2), index=0
constant.4 = s32[] constant(0)
ROOT equal-to = pred[] compare(get-tuple-element.67, constant.4), direction=EQ
}
if-body.v5.1 {
constant.5 = s32[] constant(-1)
p.3 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.68 = (s32[], s32[], s32[]) get-tuple-element(p.3), index=1
get-tuple-element.70 = s32[] get-tuple-element(get-tuple-element.68), index=2
multiply.1 = s32[] multiply(get-tuple-element.70, get-tuple-element.70)
tuple.35 = (s32[]) tuple(multiply.1)
ROOT tuple.36 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(constant.5, get-tuple-element.68, tuple.35)
}
if-condition.v4.1 {
p.4 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.71 = s32[] get-tuple-element(p.4), index=0
constant.6 = s32[] constant(1)
ROOT equal-to.1 = pred[] compare(get-tuple-element.71, constant.6), direction=EQ
}
_functionalize_body_1__.v28 {
arg_tuple.4 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.72 = s32[] get-tuple-element(arg_tuple.4), index=0
constant.7 = s32[] constant(1)
add.4 = s32[] add(get-tuple-element.72, constant.7)
get-tuple-element.73 = s32[] get-tuple-element(arg_tuple.4), index=1
get-tuple-element.74 = s32[] get-tuple-element(arg_tuple.4), index=2
less-than-or-equal-to = pred[] compare(get-tuple-element.73, get-tuple-element.74), direction=LE
constant.8 = s32[] constant(0)
select = s32[] select(less-than-or-equal-to, constant.8, constant.7)
get-tuple-element.75 = s32[] get-tuple-element(arg_tuple.4), index=3
tuple.37 = (s32[], s32[], s32[]) tuple(get-tuple-element.73, get-tuple-element.75, get-tuple-element.74)
tuple.38 = (s32[]) tuple(constant.8)
tuple.39 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(select, tuple.37, tuple.38)
while = (s32[], (s32[], s32[], s32[]), (s32[])) while(tuple.39), condition=if-condition.v4, body=if-body.v5
while.1 = (s32[], (s32[], s32[], s32[]), (s32[])) while(while), condition=if-condition.v4.1, body=if-body.v5.1
get-tuple-element.76 = (s32[]) get-tuple-element(while.1), index=2
get-tuple-element.77 = s32[] get-tuple-element(get-tuple-element.76), index=0
ROOT tuple.40 = (s32[], s32[], s32[], s32[]) tuple(add.4, get-tuple-element.73, get-tuple-element.74, get-tuple-element.77)
}
cond_wrapper.v3.1 {
inputs.1 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.78 = s32[] get-tuple-element(inputs.1), index=0
constant.11 = s32[] constant(7)
ROOT less-than.2 = pred[] compare(get-tuple-element.78, constant.11), direction=LT
}
_functionalize_body_2__.v25 {
arg_tuple.5 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.79 = s32[] get-tuple-element(arg_tuple.5), index=0
get-tuple-element.80 = s32[] get-tuple-element(arg_tuple.5), index=2
get-tuple-element.81 = s32[] get-tuple-element(arg_tuple.5), index=3
get-tuple-element.82 = s32[] get-tuple-element(arg_tuple.5), index=4
tuple.41 = (s32[], s32[], s32[], s32[]) tuple(get-tuple-element.79, get-tuple-element.80, get-tuple-element.81, get-tuple-element.82)
while.2 = (s32[], s32[], s32[], s32[]) while(tuple.41), condition=cond_wrapper.v3.1, body=_functionalize_body_1__.v28
get-tuple-element.83 = s32[] get-tuple-element(while.2), index=0
get-tuple-element.84 = s32[] get-tuple-element(arg_tuple.5), index=1
constant.12 = s32[] constant(1)
add.5 = s32[] add(get-tuple-element.84, constant.12)
get-tuple-element.85 = s32[] get-tuple-element(while.2), index=3
ROOT tuple.42 = (s32[], s32[], s32[], s32[], s32[]) tuple(get-tuple-element.83, add.5, get-tuple-element.80, get-tuple-element.81, get-tuple-element.85)
}
cond_wrapper.v3.2 {
inputs.2 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.86 = s32[] get-tuple-element(inputs.2), index=1
constant.13 = s32[] constant(5)
ROOT less-than.3 = pred[] compare(get-tuple-element.86, constant.13), direction=LT
}
ENTRY TestComputation {
arg_tuple.6 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
ROOT while.3 = (s32[], s32[], s32[], s32[], s32[]) while(arg_tuple.6), condition=cond_wrapper.v3.2, body=_functionalize_body_2__.v25
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
InsertCopies(module.get());
}
TEST_F(CopyInsertionTest, NestedWhiles) {
const std::string& hlo_string = R"(
HloModule TestModule
cond.inner {
ROOT param.cond.inner = pred[] parameter(0)
}
body.inner {
param.body.inner = pred[] parameter(0)
ROOT not = pred[] not(param.body.inner)
}
cond.outer {
ROOT param.cond.outer = pred[] parameter(0)
}
body.outer {
param.cond.outer = pred[] parameter(0)
ROOT while = pred[] while(param.cond.outer), condition=cond.inner, body=body.inner
}
ENTRY TestComputation {
entry_param = pred[] parameter(0)
ROOT while = pred[] while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::While(op::Copy(op::Parameter())));
}
TEST_F(CopyInsertionTest, NestedWhilesWithParamRoot) {
const std::string& hlo_string = R"(
HloModule TestModule
cond.inner {
ROOT param.cond.inner = pred[] parameter(0)
}
body.inner {
param.body.inner = pred[] parameter(0)
ROOT not = pred[] not(param.body.inner)
}
cond.outer {
ROOT param.cond.outer = pred[] parameter(0)
}
body.outer {
ROOT param.cond.outer = pred[] parameter(0)
while = pred[] while(param.cond.outer), condition=cond.inner, body=body.inner
after-all = token[] after-all()
outfeed = token[] outfeed(while, after-all)
}
ENTRY TestComputation {
entry_param = pred[] parameter(0)
while = pred[] while(entry_param), condition=cond.outer, body=body.outer
ROOT not = pred[] not(while)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Not(op::While(op::Parameter())));
HloInstruction* outfeed = FindInstruction(module.get(), "outfeed");
EXPECT_THAT(outfeed, op::Outfeed(op::While(op::Copy(op::Parameter(0))),
op::AfterAll()));
}
TEST_F(CopyInsertionTest, NestedWhilesWithParamRoot2) {
const std::string& hlo_string = R"(
HloModule TestModule
cond.inner {
param.cond.inner = (pred[], pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param.cond.inner), index=0
}
body.inner {
param.body.inner = (pred[], pred[]) parameter(0)
gte.0 = pred[] get-tuple-element(param.body.inner), index=0
gte.1 = pred[] get-tuple-element(param.body.inner), index=1
and = pred[] and(gte.0, gte.1)
not = pred[] not(gte.1)
ROOT root = (pred[], pred[]) tuple(and, not)
}
cond.outer {
param.cond.outer = (pred[], pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param.cond.outer), index=0
}
body.outer {
param.body.outer = (pred[], pred[]) parameter(0)
gte.0 = pred[] get-tuple-element(param.body.outer), index=0
gte.1 = pred[] get-tuple-element(param.body.outer), index=1
while.inner = (pred[], pred[]) while(param.body.outer), condition=cond.inner, body=body.inner
gte.2 = pred[] get-tuple-element(while.inner), index=0
after-all = token[] after-all()
outfeed = token[] outfeed(gte.2, after-all)
ROOT root = (pred[], pred[]) tuple(gte.0, gte.1)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
entry_param.2 = pred[] parameter(1)
tuple = (pred[], pred[]) tuple(entry_param.1, entry_param.2)
while.outer = (pred[], pred[]) while(tuple), condition=cond.outer, body=body.outer
gte = pred[] get-tuple-element(while.outer), index=0
ROOT not = pred[] not(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
HloInstruction* while_inner = FindInstruction(module.get(), "while.inner");
EXPECT_THAT(
while_inner,
op::While(op::Tuple(op::Copy(op::GetTupleElement(op::Parameter(0))),
op::Copy(op::GetTupleElement(op::Parameter(0))))));
}
TEST_F(CopyInsertionTest, NestedWhileAndConditional2) {
const std::string& hlo_string = R"(
HloModule TestModule
on_true
{
v1 = f32[2] parameter(0)
v2 = f32[2] add(v1,v1)
ROOT t1 = (f32[2], f32[2]) tuple(v1,v2)
}
on_false
{
v1 = f32[2] parameter(0)
v2 = f32[2] multiply(v1,v1)
ROOT t2 = (f32[2], f32[2]) tuple(v1,v2)
}
cond.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
ROOT param.cond.outer = pred[] get-tuple-element(param.1), index=0
}
body.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
pred.1 = pred[] get-tuple-element(param.1), index=0
arg_tuple.11 = f32[2] get-tuple-element(param.1), index=1
if = (f32[2], f32[2]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
e1 = f32[2] get-tuple-element(if), index=0
e2 = f32[2] get-tuple-element(if), index=1
ROOT res = (pred[], f32[2], f32[2]) tuple(pred.1,e1, e2)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
float_param = f32[2] parameter(1)
entry_param = (pred[], f32[2], f32[2]) tuple(entry_param.1, float_param, float_param)
ROOT while = (pred[], f32[2], f32[2]) while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 3);
}
TEST_F(CopyInsertionTest, NestedWhileAndConditional) {
const std::string& hlo_string = R"(
HloModule TestModule
on_true
{
v1 = f32[2] parameter(0)
ROOT v2 = f32[2] add(v1,v1)
}
on_false
{
v1 = f32[2] parameter(0)
ROOT v2 = f32[2] multiply(v1,v1)
}
cond.outer {
param.1 = (pred[], f32[2]) parameter(0)
ROOT param.cond.outer = pred[] get-tuple-element(param.1), index=0
}
body.outer {
param.1 = (pred[], f32[2]) parameter(0)
pred.1 = pred[] get-tuple-element(param.1), index=0
arg_tuple.11 = f32[2] get-tuple-element(param.1), index=1
if = f32[2] conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
ROOT res = (pred[], f32[2]) tuple(pred.1,if)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
float_param = f32[2] parameter(1)
entry_param = (pred[], f32[2]) tuple(entry_param.1, float_param)
ROOT while = (pred[], f32[2]) while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
VLOG(2) << module->ToString() << "\n";
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, FixpointComputationRequired) {
const std::string& hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[3,3,96,1] parameter(0)
param1 = f32[] parameter(1)
broadcast = f32[3,3,96,1] broadcast(f32[] param1), dimensions={}
ROOT %add.0 = f32[3,3,96,1] add(f32[3,3,96,1] param0, f32[3,3,96,1] broadcast)
}
ENTRY entry_computation {
arg0 = f32[3,3,96,1] parameter(0)
arg1 = f32[] parameter(1)
fusion = f32[3,3,96,1] fusion(f32[3,3,96,1] arg0, f32[] arg1),
kind=kLoop, calls=fused_computation
negate = f32[] negate(f32[] arg1)
ROOT tuple = (f32[3,3,96,1], f32[3,3,96,1], f32[], f32[]) tuple(
f32[3,3,96,1] fusion,
f32[3,3,96,1] arg0,
f32[] negate,
f32[] arg1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1},
0,
{}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{3},
1,
{}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, NoAliasCheckViolation) {
const std::string& hlo_string = R"(
HloModule cluster
ENTRY Entry {
%arg = f32[8,28,28,1] parameter(0)
%bitcast.2 = f32[8,1,28,28] bitcast(f32[8,28,28,1] %arg)
ROOT %tuple.1 = (f32[8,1,28,28], f32[8,28,28,1]) tuple(f32[8,1,28,28] %bitcast.2, f32[8,28,28,1] %arg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1},
0,
{}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, DynamicUpdateSliceNoCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(negate, broadcast.6, constant.3, constant.3, constant.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, FusedDynamicUpdateSliceNoCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
ROOT fusion = f32[1280,1,128] fusion(negate), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, DynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add = f32[1280,1,128] add(negate, negate)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(negate, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple = (f32[1280,1,128], f32[1280,1,128]) tuple(add, dynamic-update-slice.5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, DynamicUpdateSliceParameterShareCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param, broadcast.6, constant.3, constant.3, constant.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, FusedDynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
add = f32[1280,1,128] add(negate, negate)
fusion = f32[1280,1,128] fusion(negate), kind=kLoop, calls=fused_computation
ROOT tuple = (f32[1280,1,128], f32[1280,1,128]) tuple(negate, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, ChainDynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}
get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1
get-tuple-element.3 = s32[] get-tuple-element(state), index=0
constant.2 = s32[] constant(128)
add.5 = s32[] add(get-tuple-element.3, constant.2)
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, FusedDynamicUpdateSliceCopy2) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation.1 {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
}
fused_computation.2 {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
slice = f32[128,1,128] slice(param1), slice={[0:128], [0:1], [0:128]}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, slice, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
add = f32[1280,1,128] add(negate, negate)
fusion1 = f32[1280,1,128] fusion(negate), kind=kLoop, calls=fused_computation.1
ROOT fusion2 = f32[1280,1,128] fusion(fusion1, negate), kind=kLoop, calls=fused_computation.2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, MultiOutputFusedDynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
param2 = f32[1280,1,128] parameter(2)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add.1 = f32[1280,1,128] add(param0, param0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
negate1 = f32[1280,1,128] negate(param)
negate2 = f32[1280,1,128] negate(param)
fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation
gte0 = f32[1280,1,128] get-tuple-element(fusion), index=0
gte1 = f32[1280,1,128] get-tuple-element(fusion), index=1
gte2 = f32[1280,1,128] get-tuple-element(fusion), index=2
add0 = f32[1280,1,128] add(negate0, gte0)
add1 = f32[1280,1,128] add(negate1, gte1)
add2 = f32[1280,1,128] add(negate2, gte2)
ROOT tuple = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, MultiOutputFusedDynamicUpdateSliceNoCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
param2 = f32[1280,1,128] parameter(2)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add.1 = f32[1280,1,128] add(param0, param0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
negate1 = f32[1280,1,128] negate(param)
negate2 = f32[1280,1,128] negate(param)
fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation
gte0 = f32[1280,1,128] get-tuple-element(fusion), index=0
gte1 = f32[1280,1,128] get-tuple-element(fusion), index=1
gte2 = f32[1280,1,128] get-tuple-element(fusion), index=2
add0 = f32[1280,1,128] add(negate0, gte0)
add1 = f32[1280,1,128] add(gte1, gte1)
add2 = f32[1280,1,128] add(negate2, gte2)
ROOT tuple = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, ScatterSharedOperand) {
absl::string_view hlo_string = R"(
HloModule Module
update_s32 {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
fused_computation {
iota.1 = s32[73729]{0} iota(), iota_dimension=0
ROOT indices.1 = s32[73729]{0} reverse(iota.1), dimensions={0}
}
ENTRY main {
iota.2 = s32[73729]{0} iota(), iota_dimension=0
fusion = s32[73729]{0} fusion(), kind=kLoop, calls=fused_computation
ROOT scatter = s32[73729]{0} scatter(iota.2, fusion, iota.2), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=update_s32
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Scatter(op::Copy(op::Iota()), op::Fusion(), op::Iota()));
}
TEST_F(CopyInsertionTest, HorizontalLoopFusionNoCopy) {
const std::string& hlo_string = R"(
HloModule test
fused_computation {
p0 = f32[10,20] parameter(0)
p1 = f32[10,20] parameter(1)
p2 = f32[10,10] parameter(2)
p3 = f32[10,10] parameter(3)
add0 = f32[10, 20] add(p0, p1)
sub0 = f32[10, 10] subtract(p2, p3)
reshape0 = f32[200] reshape(add0)
reshape1 = f32[100] reshape(sub0)
concat0 = f32[300] concatenate(reshape0, reshape1), dimensions={0}
slice0 = f32[200] slice(concat0), slice={[0:200]}
slice1 = f32[100] slice(concat0), slice={[200:300]}
ROOT tuple = (f32[200], f32[100]) tuple(slice0, slice1)
}
ENTRY test {
p0 = f32[10,20] parameter(0)
p1 = f32[10,20] parameter(1)
p2 = f32[10,10] parameter(2)
p3 = f32[10,10] parameter(3)
fusion = (f32[200], f32[100]) fusion(p0, p1, p2, p3), kind=kInput, calls=fused_computation
gte0 = f32[200] get-tuple-element(fusion), index=0
gte1 = f32[100] get-tuple-element(fusion), index=1
bitcast0 = f32[10,20] bitcast(gte0)
bitcast1 = f32[10,10] bitcast(gte1)
ROOT tuple = (f32[10,20], f32[10,10]) tuple(bitcast0, bitcast1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0},
0,
{}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1},
3,
{}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, NestedWhileAndConditional3) {
const std::string& hlo_string = R"(
HloModule TestModule
on_true.1
{
ROOT v1 = f32[2] parameter(0)
}
on_false.1
{
v1 = f32[2] parameter(0)
ROOT v2 = f32[2] multiply(v1,v1)
}
on_true
{
v1 = f32[2] parameter(0)
v2 = f32[2] add(v1,v1)
v3 = (f32[2],f32[2]) tuple(v1,v2)
v4 = f32[2] get-tuple-element(v3), index=1
v5 = f32[2] multiply(v4,v2)
ROOT t1 = (f32[2], f32[2]) tuple(v5,v2)
}
on_false
{
v1 = f32[2] parameter(0)
v2 = f32[2] multiply(v1,v1)
pred.1 = pred[] constant(true)
v4 = f32[2] conditional(pred.1, v1, v2), true_computation=on_true.1, false_computation=on_false.1
v5 = f32[2] multiply(v4,v2)
ROOT t2 = (f32[2], f32[2]) tuple(v2,v5)
}
cond.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
ROOT param.cond.outer = pred[] get-tuple-element(param.1), index=0
}
body.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
pred.1 = pred[] get-tuple-element(param.1), index=0
arg_tuple.11 = f32[2] get-tuple-element(param.1), index=1
if = (f32[2], f32[2]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
e1 = f32[2] get-tuple-element(if), index=0
e2 = f32[2] get-tuple-element(if), index=1
ROOT res = (pred[], f32[2], f32[2]) tuple(pred.1,e1, e2)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
float_param = f32[2] parameter(1)
entry_param = (pred[], f32[2], f32[2]) tuple(entry_param.1, float_param, float_param)
ROOT while = (pred[], f32[2], f32[2]) while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4);
}
TEST_F(CopyInsertionTest, ConditionalBranchMustCopy1) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0_comp.5.clone {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT tuple.5 = (s32[2]{0:T(128)}) tuple(%copy)
}
branch_1_comp.12.clone {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
ROOT tuple.6 = (s32[2]{0:T(128)}) tuple(%copy.1)
}
ENTRY TestComputation {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
%conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}, metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%gte.1 = s32[2]{0:T(128)} get-tuple-element(conditional.18), index=0
ROOT tuple.4 = (s32[2]{0:T(128)},s32[2]{0:T(128)}) tuple(parameter.2, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kTuple);
auto copy1 = tuple6->operand(0);
CHECK_EQ(copy1->opcode(), HloOpcode::kCopy);
}
TEST_F(CopyInsertionTest, ConditionalBranchMustCopy2) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0_comp.5.clone {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT tuple.5 = (s32[2]{0:T(128)}) tuple(%copy)
}
branch_1_comp.12.clone {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
%constant.1 = s32[] constant(0)
%broadcast.6 = s32[2] broadcast(constant.1), dimensions={}
dynamic-update-slice.5 = s32[2]{0:T(128)} dynamic-update-slice(%copy.1, %broadcast.6, %constant.1)
%add.1 = s32[2]{0:T(128)} add(dynamic-update-slice.5, %copy.1)
ROOT tuple.6 = (s32[2]{0:T(128)}) tuple(%add.1)
}
ENTRY TestComputation {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
%conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}
%gte.1 = s32[2]{0:T(128)} get-tuple-element(conditional.18), index=0
ROOT tuple.4 = (s32[2]{0:T(128)},s32[2]{0:T(128)}) tuple(parameter.2, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kTuple);
auto add1 = tuple6->operand(0);
CHECK_EQ(add1->opcode(), HloOpcode::kAdd);
auto dus = add1->operand(0);
auto copy1 = dus->operand(0);
CHECK_EQ(copy1->opcode(), HloOpcode::kCopy);
}
TEST_F(CopyInsertionTest, ConditionalBranchMustCopy3) {
const std::string& hlo_string = R"(
HloModule primitive_computation_cond.19
%branch_0_comp.5.clone (parameter.0: (s32[2])) -> (s32[2]) {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT %tuple.5 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %copy)
}
%branch_1_comp.12.clone (parameter.4: (s32[2])) -> (s32[2]) {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
ROOT %tuple.6 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %copy.1)
}
ENTRY %primitive_computation_cond.19 (parameter.1: s32[], parameter.2: s32[2], parameter.3: s32[2]) -> (s32[2]) {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
ROOT %conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}, metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kTuple);
auto copy1 = tuple6->operand(0);
CHECK_EQ(copy1->opcode(), HloOpcode::kCopy);
}
TEST_F(CopyInsertionTest, ConditionalBranchDoNotCopy1) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0_comp.5.clone {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT tuple.5 = (s32[2]{0:T(128)}) tuple(%copy)
}
branch_1_comp.12.clone {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
ROOT tuple.6 = (s32[2]{0:T(128)}) tuple(%copy.1)
}
ENTRY TestComputation {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
%conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}, metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%gte.1 = s32[2]{0:T(128)} get-tuple-element(conditional.18), index=0
ROOT tuple.4 = (s32[2]{0:T(128)},s32[2]{0:T(128)}) tuple(gte.1, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString() << "\n";
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kParameter);
}
TEST_F(CopyInsertionTest, ConditionalWithMultiOutputFusion) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0 {
param_0 = f64[] parameter(0)
negate.2 = f64[] negate(f64[] param_0)
ROOT tuple = (f64[], f64[]) tuple(f64[] negate.2, f64[] negate.2)
}
fused_computation {
param_0.1 = f64[] parameter(0)
abs.2 = f64[] abs(f64[] param_0.1)
negate.1 = f64[] negate(f64[] param_0.1)
ROOT %tuple.2 = (f64[], f64[]) tuple(f64[] negate.1, f64[] abs.2)
}
branch_1 {
param_0.2 = f64[] parameter(0)
ROOT fusion = (f64[], f64[]) fusion(f64[] param_0.2), kind=kLoop, calls=%fused_computation
}
ENTRY main {
pred.0 = s32[] parameter(0)
param_1 = f64[] parameter(1)
param_2 = f64[] parameter(2)
ROOT conditional.0 = (f64[], f64[]) conditional(s32[] pred.0, f64[] param_1, f64[] param_2), branch_computations={%branch_0, %branch_1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_0")), 1);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_1")), 0);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("main")), 0);
}
TEST_F(CopyInsertionTest, ConditionalWithVariadicReduce) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0 {
empty_tuple.0 = () parameter(0)
c_0 = f64[] constant(0)
ROOT tuple.3 = (f64[], f64[]) tuple(c_0, c_0)
}
fused_computation {
param_0.1 = f64[] parameter(0)
abs.2 = f64[] abs(f64[] param_0.1)
negate.1 = f64[] negate(f64[] param_0.1)
ROOT %tuple.2 = (f64[], f64[]) tuple(f64[] negate.1, f64[] abs.2)
}
reduce_region {
param_0.0 = f64[] parameter(0)
param_2.0 = f64[] parameter(2)
add.1.0 = f64[] add(param_0.0, param_2.0)
param_1.0 = f64[] parameter(1)
param_3.0 = f64[] parameter(3)
multiply.1.0 = f64[] multiply(param_1.0, param_3.0)
ROOT tuple.0.0 = (f64[], f64[]) tuple(add.1.0, multiply.1.0)
}
branch_1 {
c_0 = f64[] constant(0)
param_0.1 = f64[128]{0} parameter(0)
ROOT reduce = (f64[], f64[]) reduce(param_0.1, param_0.1, c_0, c_0), dimensions={0}, to_apply=reduce_region
}
ENTRY main {
pred.0 = s32[] parameter(0)
empty_tuple = () tuple()
param_2 = f64[128] parameter(1), sharding={replicated}
ROOT conditional.0 = (f64[], f64[]) conditional(s32[] pred.0, () empty_tuple, f64[128] param_2), branch_computations={%branch_0, %branch_1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_0")), 2);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_1")), 0);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("main")), 0);
}
TEST_F(CopyInsertionTest, RootInstructionNotLast) {
const std::string& hlo_string = R"(
HloModule module, is_scheduled=true
body2 {
p_body2 = (f32[2]{0}) parameter(0)
p_body2.1 = f32[2]{0} get-tuple-element(p_body2), index=0
add.3 = f32[2]{0} add(p_body2.1, p_body2.1)
ROOT root2 = (f32[2]{0}) tuple(add.3)
}
condition2 {
p_cond2 = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
body {
p_body = (f32[2]{0}) parameter(0)
p_body.1 = f32[2]{0} get-tuple-element(p_body), index=0
ROOT root = (f32[2]{0}) tuple(p_body.1)
copy = f32[2]{0} copy(p_body.1)
tuple = (f32[2]{0}) tuple(copy)
while.1 = (f32[2]{0}) while(tuple), condition=condition2, body=body2
}
condition {
p_cond = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const0 = f32[2]{0} constant({1, 2})
while_init = (f32[2]{0}) tuple(const0)
ROOT while.0 = (f32[2]{0}) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.RemoveUnnecessaryCopies(module.get()));
auto while_1 = FindInstruction(module.get(), "while.1");
EXPECT_THAT(while_1, op::While(op::Tuple(op::Copy())));
}
TEST_F(CopyInsertionTest, InPlaceCollectivePermuteCopy) {
absl::string_view hlo_string = R"(
HloModule hlo_runner_test_0.1
ENTRY hlo_runner_test_0.1 {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, u32[2,8,128]{2,1,0:T(2,128)} broadcast.0)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.5)
tuple.7 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.2)
tuple.8 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.7)
tuple.9 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.6)
tuple.10 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.7)
collective-permute.0 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.8, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
collective-permute.1 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.8, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.10), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
ROOT tuple = ((u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}), (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)})) tuple(collective-permute.0, collective-permute.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4);
}
TEST_F(CopyInsertionTest, KeepCopyOfBroadcast) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[128,1,128] parameter(0)
negate = f32[128,1,128] negate(param)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
broadcast.7 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[128,1,128] dynamic-update-slice(broadcast.6, broadcast.7, constant.3, constant.3, constant.3)
add1 = f32[128,1,128] add(dynamic-update-slice.5, dynamic-update-slice.5)
dynamic-update-slice.4 = f32[128,1,128] dynamic-update-slice(broadcast.6, broadcast.7, constant.3, constant.3, constant.3)
add2 = f32[128,1,128] add(dynamic-update-slice.4, dynamic-update-slice.4)
tuple = (f32[128,1,128], f32[128,1,128]) tuple(add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, CustomCallAliasingCopyInsertedAliasedParam) {
const char* const kModuleString = R"(
HloModule xla_computation_f
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
ROOT custom-call = f32[2,3,4,5] custom-call(parameter.1, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
InsertCopies(module.get());
HloInstruction* custom_call = module->entry_computation()->root_instruction();
EXPECT_THAT(custom_call->operand(0), op::Copy(op::Parameter(0)));
}
TEST_F(CopyInsertionTest, CustomCallAliasingCopyInsertedAliasedReuse) {
const char* const kModuleString = R"(
HloModule xla_computation_f
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
add.1 = f32[2,3,4,5] add(parameter.1, parameter.2)
custom-call = f32[2,3,4,5] custom-call(add.1, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
ROOT add.2 = f32[2,3,4,5] add(custom-call, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
InsertCopies(module.get());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
CHECK_NE(custom_call, nullptr);
EXPECT_THAT(custom_call->operand(0), op::Copy(op::Add()));
}
TEST_F(CopyInsertionTest, CustomCallAliasingCopyRemoved) {
const char* const kModuleString = R"(
HloModule xla_computation_f__1
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
add = f32[2,3,4,5] add(parameter.1, parameter.2)
ROOT custom-call = f32[2,3,4,5] custom-call(add, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
InsertCopies(module.get());
HloInstruction* custom_call = module->entry_computation()->root_instruction();
EXPECT_THAT(custom_call->operand(0), op::Add());
}
TEST_F(CopyInsertionTest, ReverseInConditional) {
const char* const kModuleString = R"(
HloModule jit_f.0
%region_0.4 (Arg_.5: u8[300,451,3]) -> (u8[300,451,3]) {
%Arg_.5 = u8[300,451,3]{1,0,2:T(8,128)(4,1)} parameter(0)
ROOT %tuple = (u8[300,451,3]{1,0,2:T(8,128)(4,1)}) tuple(u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_.5)
}
%region_1.9 (Arg_.10: u8[300,451,3]) -> (u8[300,451,3]) {
%Arg_.10 = u8[300,451,3]{1,0,2:T(8,128)(4,1)} parameter(0)
%reverse = u8[300,451,3]{1,0,2:T(8,128)(4,1)} reverse(u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_.10), dimensions={0}
ROOT %tuple.1 = (u8[300,451,3]{1,0,2:T(8,128)(4,1)}) tuple(u8[300,451,3]{1,0,2:T(8,128)(4,1)} %reverse)
}
ENTRY %main.13 (Arg_0.1: pred[], Arg_1.2: u8[300,451,3]) -> u8[300,451,3] {
%Arg_0.1 = pred[]{:T(1024)} parameter(0)
%convert.3 = s32[]{:T(256)} convert(pred[]{:T(1024)} %Arg_0.1)
%Arg_1.2 = u8[300,451,3]{1,0,2:T(8,128)(4,1)} parameter(1)
%conditional.12.clone = (u8[300,451,3]{1,0,2:T(8,128)(4,1)}) conditional(s32[]{:T(256)} %convert.3, u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_1.2, u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_1.2), branch_computations={%region_0.4, %region_1.9}
ROOT %get-tuple-element = u8[300,451,3]{1,0,2:T(8,128)(4,1)} get-tuple-element((u8[300,451,3]{1,0,2:T(8,128)(4,1)}) %conditional.12.clone), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(2) << module->ToString();
HloInstruction* reverse = FindInstruction(module.get(), "reverse");
EXPECT_THAT(reverse->operand(0), op::Copy());
}
TEST_F(CopyInsertionTest, InputOutputAliasCopy) {
const char* const kModuleString = R"(
HloModule main_tf2xla.11, input_output_alias={ {0}: (0, {1}, may-alias) }
ENTRY %main_tf2xla.11 (arg_tuple.1: (f32[], f32[])) -> (f32[], f32[]) {
ROOT %arg_tuple.1 = (f32[]{:T(256)}, f32[]{:T(256)}) parameter(0), parameter_replication={false,false}, sharding={{replicated}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(2) << module->ToString();
}
TEST_F(CopyInsertionTest, AddControlDependencyForInputOutputAlias) {
const char* const kModuleString = R"(
HloModule test, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) }
ENTRY test {
x = f32[3] parameter(0)
y = f32[3] parameter(1)
add = f32[3] add(x, y)
mul = f32[3] multiply(x, y)
ROOT result = (f32[3], f32[3]) tuple(add, mul)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_EQ(CountControlEdges(*module), 2);
HloInstruction* add_instr = FindInstruction(module.get(), HloOpcode::kAdd);
HloInstruction* mul_instr =
FindInstruction(module.get(), HloOpcode::kMultiply);
HloInstruction* copy_instr = FindInstruction(module.get(), HloOpcode::kCopy);
EXPECT_TRUE(add_instr->control_predecessors()[0] == mul_instr);
EXPECT_TRUE(copy_instr->control_predecessors()[0] == add_instr);
}
TEST_F(CopyInsertionTest, AsyncCallDUSNoCopy) {
const char* const kModuleString = R"(
HloModule async_call
%called_computation {
%out_param = s32[1024]{0} parameter(1)
%input = s32[1024]{0} parameter(0)
%size = s32[] constant(256)
%index = s32[] custom-call(), custom_call_target="Baz"
%start = s32[] multiply(s32[] %size, s32[] %index)
%input2 = s32[256]{0} dynamic-slice(s32[1024]{0} %input, s32[] %start), dynamic_slice_sizes={256}
%output = s32[256]{0} add(s32[256]{0} %input2, s32[256]{0} %input2)
ROOT %output2 = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %out_param, s32[256]{0} %output, s32[] %start)
}, execution_thread="foobar"
%async_wrapped {
%async_param = s32[1024]{0} parameter(0)
%async_param.1 = s32[1024]{0} parameter(1)
ROOT %call = s32[1024]{0} call(s32[1024]{0} %async_param, s32[1024]{0} %async_param.1), to_apply=%called_computation
}, execution_thread="foobar"
ENTRY %main {
%input.1 = s32[1024]{0} parameter(0)
%buf = s32[1024]{0} custom-call(), custom_call_target="AllocateBuffer"
%async-start = ((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) async-start(s32[1024]{0} %input.1, s32[1024]{0} %buf), async_execution_thread="foobar", calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) %async-start), async_execution_thread="foobar", calls=%async_wrapped
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get(), {"foobar"}).status());
VLOG(2) << module->ToString();
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, AsyncCallDUSCopy) {
const char* const kModuleString = R"(
HloModule async_call
%called_computation {
%out_param = s32[1024]{0} parameter(1)
%input = s32[1024]{0} parameter(0)
%size = s32[] constant(256)
%index = s32[] custom-call(), custom_call_target="Baz"
%start = s32[] multiply(s32[] %size, s32[] %index)
%input2 = s32[256]{0} dynamic-slice(s32[1024]{0} %input, s32[] %start), dynamic_slice_sizes={256}
%output = s32[256]{0} add(s32[256]{0} %input2, s32[256]{0} %input2)
ROOT %output2 = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %out_param, s32[256]{0} %output, s32[] %start)
}, execution_thread="foobar"
%async_wrapped {
%async_param = s32[1024]{0} parameter(0)
%async_param.1 = s32[1024]{0} parameter(1)
ROOT %call = s32[1024]{0} call(s32[1024]{0} %async_param, s32[1024]{0} %async_param.1), to_apply=%called_computation
}, execution_thread="foobar"
ENTRY %main {
%input.1 = s32[1024]{0} parameter(0)
%input.2 = s32[1024]{0} parameter(1)
%async-start = ((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) async-start(s32[1024]{0} %input.1, s32[1024]{0} %input.2), async_execution_thread="foobar", calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) %async-start), async_execution_thread="foobar", calls=%async_wrapped
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get(), {"foobar"}).status());
VLOG(2) << module->ToString();
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest,
RegionAnalysisDoesNotAddUnnecessaryCopyOfInputTupleElements) {
const char* const kModuleString = R"(
HloModule while_aliasing, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias) }
add {
param_0 = f32[1,128] parameter(0)
param_1 = f32[1,128] parameter(1)
ROOT add = f32[1,128] add(param_0, param_1)
}
condition {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[1,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
add = f32[1,128] add(param_0, param_1)
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
ROOT output_tuple = (f32[1,128], f32[1,128], pred[]) tuple(add, splat_c0, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[1,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[1,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto add = FindInstruction(module.get(), "add.1");
EXPECT_NE(add, nullptr);
EXPECT_EQ(add->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(add->operand(1)->opcode(), HloOpcode::kGetTupleElement);
}
TEST_F(CopyInsertionTest,
RegionAnalysisDoesNotAddCopyForNonUpdateParameterOfDynamicSliceUpdate) {
const char* const kModuleString = R"(
HloModule while_aliasing, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias) }
fused_computation {
param_0 = f32[4,2,128,512]{3,2,1,0} parameter(0)
param_1 = f32[2,128,512]{2,1,0} parameter(1)
bitcast.1 = f32[1,2,128,512]{3,2,1,0} bitcast(param_1)
param_2 = s32[] parameter(2)
constant.1 = s32[] constant(0)
compare.1 = pred[] compare(param_2, constant.1), direction=LT
constant.2 = s32[] constant(4)
add.1 = s32[] add(param_2, constant.2)
select.1 = s32[] select(compare.1, add.1, param_2)
ROOT dynamic-update-slice.73 = f32[4,2,128,512]{3,2,1,0} dynamic-update-slice(param_0, bitcast.1, select.1, constant.1, constant.1, constant.1)
}
condition {
input_tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=3
}
body {
input_tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) parameter(0)
get-tuple-element.0 = s32[] get-tuple-element(input_tuple), index=0
get-tuple-element.1 = f32[4,2,128,512]{3,2,1,0} get-tuple-element(input_tuple), index=2
get-tuple-element.2 = f32[2,128,512]{2,1,0} get-tuple-element(input_tuple), index=1
fusion = f32[4,2,128,512]{3,2,1,0} fusion(get-tuple-element.1, get-tuple-element.2, get-tuple-element.0), kind=kLoop, calls=fused_computation
cond = pred[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
fusion.1 = f32[2,128,512]{2,1,0} broadcast(c0), dimensions={}
ROOT output_tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) tuple(get-tuple-element.0, fusion.1, fusion, cond)
}
ENTRY main {
param_0 = f32[2,128,512] parameter(0)
param_1 = f32[4,2,128,512] parameter(1)
param_2 = pred[] parameter(2)
param_3 = s32[] parameter(3)
tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) tuple(param_3, param_0, param_1, param_2)
ROOT while = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto fusion = FindInstruction(module.get(), "fusion");
EXPECT_NE(fusion, nullptr);
EXPECT_EQ(fusion->operand(1)->opcode(), HloOpcode::kGetTupleElement);
}
TEST_F(CopyInsertionTest, RegionAnalysisNoCopyOfAddOutputInsideWhileBody) {
const char* const kModuleString = R"(
HloModule while_aliasing
condition {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[1,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_1)
add_1 = f32[1,128] add(splat_c0, splat_c0)
ROOT output_tuple = (f32[1,128], f32[1,128], pred[]) tuple(add, add_1, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[1,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[1,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto root = FindInstruction(module.get(), "tuple.3");
EXPECT_NE(root, nullptr);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kAdd);
EXPECT_EQ(root->operand(1)->opcode(), HloOpcode::kAdd);
EXPECT_EQ(root->operand(2)->opcode(), HloOpcode::kGetTupleElement);
}
TEST_F(CopyInsertionTest, DontInsertCopiesInAsyncComputation) {
constexpr absl::string_view kModuleString = R"(
HloModule test
%async_computation {
%param_0 = f32[10,32,512]{2,1,0:T(8,128)S(5)} parameter(0)
%param_1 = f32[1,32,512]{2,1,0:T(8,128)} parameter(1)
%param_2 = s32[]{:T(128)} parameter(2)
%param_3 = s32[]{:T(128)} parameter(3)
%param_4 = s32[]{:T(128)} parameter(4)
ROOT %dynamic-update-slice.1 = f32[10,32,512]{2,1,0:T(8,128)S(5)}
dynamic-update-slice(%param_0, %param_1, %param_2, %param_3, %param_4)
}
ENTRY %main {
%param.1 = (s32[]{:T(128)}, f32[32,512]{1,0:T(8,128)},
f32[10,32,512]{2,1,0:T(8,128)S(5)}) parameter(0)
%get-tuple-element.132 = f32[10,32,512]{2,1,0:T(8,128)S(5)} get-tuple-element(
%param.1), index=2
%get-tuple-element.131 = f32[32,512]{1,0:T(8,128)} get-tuple-element(
%param.1), index=1
%cosine.0 = f32[32,512]{1,0:T(8,128)} cosine(%get-tuple-element.131)
%reshape.6 = f32[1,32,512]{2,1,0:T(8,128)} reshape(%cosine.0)
%get-tuple-element.130 = s32[]{:T(128)} get-tuple-element(%param.1), index=0
%constant.49 = s32[]{:T(128)} constant(0)
%compare.13 = pred[]{:T(512)} compare(
%get-tuple-element.130, %constant.49), direction=LT
%constant.50 = s32[]{:T(128)} constant(10)
%add.22 = s32[]{:T(128)} add(%get-tuple-element.130, %constant.50)
%select.6 = s32[]{:T(128)} select(
%compare.13, %add.22, %get-tuple-element.130)
%dynamic-update-slice-start = (
(f32[10,32,512]{2,1,0:T(8,128)S(5)}, f32[1,32,512]{2,1,0:T(8,128)},
s32[]{:T(128)}, s32[]{:T(128)}, s32[]{:T(128)}),
f32[10,32,512]{2,1,0:T(8,128)S(5)}, u32[]) async-start(
%get-tuple-element.132, %reshape.6, %select.6,
%constant.49, %constant.49), calls=%async_computation
ROOT %dynamic-update-slice-done = f32[10,32,512]{2,1,0:T(8,128)S(5)}
async-done(%dynamic-update-slice-start), calls=%async_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
LOG(INFO) << module->ToString();
auto* async_computation = module->GetComputationWithName("async_computation");
ASSERT_THAT(async_computation, NotNull());
EXPECT_EQ(CountCopies(*async_computation), 0);
auto* main_computation = module->GetComputationWithName("main");
ASSERT_THAT(main_computation, NotNull());
EXPECT_EQ(CountCopies(*main_computation), 1);
}
TEST_F(CopyInsertionTest, AsyncDUSInLoop) {
constexpr absl::string_view kModuleString = R"(
HloModule module
async_wrapped {
async_param.1 = s32[1024]{0} parameter(0)
async_param.2 = s32[256]{0} parameter(1)
async_param.3 = s32[] parameter(2)
ROOT dus = s32[1024]{0} dynamic-update-slice(async_param.1, async_param.2, async_param.3)
}
condition {
input_tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=3
}
body {
input_tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) parameter(0)
input.1 = s32[1024]{0} get-tuple-element(input_tuple), index=0
input.2 = s32[256]{0} get-tuple-element(input_tuple), index=1
input.3 = s32[] get-tuple-element(input_tuple), index=2
input.4 = pred[] get-tuple-element(input_tuple), index=3
async-start = ((s32[1024]{0}, s32[256]{0}, s32[]), s32[1024]{0}, u32[]) async-start(input.1, input.2, input.3), calls=%async_wrapped
async-done = s32[1024]{0} async-done(async-start), calls=async_wrapped
ROOT tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) tuple(async-done, input.2, input.3, input.4)
}
ENTRY main {
input.1 = s32[256]{0} parameter(0)
input.2 = s32[] parameter(1)
input.3 = pred[] parameter(2)
broadcast = s32[1024]{0} broadcast(input.2), dimensions={}
while_tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) tuple(broadcast, input.1, input.2, input.3)
while = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) while(while_tuple), condition=condition, body=body
ROOT gte = s32[1024]{0} get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(2) << module->ToString();
EXPECT_EQ(CountCopies(*module), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/copy_insertion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/copy_insertion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa647017-9bed-4edd-a8b2-56279563cc52 | cpp | tensorflow/tensorflow | host_offload_legalize | third_party/xla/xla/service/host_offload_legalize.cc | third_party/xla/xla/service/host_offload_legalize_test.cc | #include "xla/service/host_offload_legalize.h"
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_value.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsEntryComputationParameter(HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kParameter &&
instruction->parent()->IsEntryComputation();
}
constexpr std::array<HloOpcode, 2> kUsersOpcodes = {HloOpcode::kSlice,
HloOpcode::kDynamicSlice};
HloInstruction* FindToHostAnnotationToUpdate(HloInstruction* instr) {
while (!instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
if ((instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kCopy &&
instr->opcode() != HloOpcode::kReshape) ||
instr->mutable_operand(0)->user_count() != 1) {
return nullptr;
}
instr = instr->mutable_operand(0);
}
return instr;
}
HloInstruction* FindToDeviceAnnotationToUpdate(HloInstruction* instr) {
while (!instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
if (instr->user_count() != 1 ||
(instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kCopy &&
!absl::c_linear_search(kUsersOpcodes, instr->opcode()))) {
return nullptr;
}
instr = instr->users()[0];
}
return instr;
}
HloInstruction* FindDUSFromAnnotation(HloInstruction* instr) {
while (instr->opcode() != HloOpcode::kDynamicUpdateSlice) {
if (instr->user_count() != 1 || (instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kCopy)) {
break;
}
instr = instr->users()[0];
}
return instr;
}
absl::StatusOr<bool> DuplicateBroadcastForEachUse(HloModule* module) {
bool split_at_least_one = false;
for (HloComputation* computation : module->computations()) {
std::vector<HloInstruction*> broadcasts;
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kBroadcast ||
!instruction->HasConstantOperand()) {
continue;
}
broadcasts.push_back(instruction);
}
for (HloInstruction* instruction : broadcasts) {
if (instruction->opcode() != HloOpcode::kBroadcast ||
!instruction->HasConstantOperand()) {
continue;
}
absl::InlinedVector<HloUse, 8> uses;
for (HloInstruction* user : instruction->users()) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) != instruction) {
continue;
}
uses.push_back(HloUse{user, i, {}});
}
}
if (uses.size() <= 1) {
VLOG(5) << "Skipping broadcast " << instruction->ToString()
<< " which has " << uses.size() << " uses";
continue;
}
VLOG(5) << "Splitting broadcast " << instruction->ToString()
<< " which has " << uses.size() << " uses";
split_at_least_one = true;
for (int i = 1; i < uses.size(); ++i) {
const HloUse& use = uses[i];
HloInstruction* new_broadcast =
instruction->parent()->AddInstruction(instruction->Clone());
VLOG(5) << "New broadcast " << new_broadcast->ToString();
TF_RETURN_IF_ERROR(use.instruction->ReplaceOperandWith(
use.operand_number, new_broadcast));
}
}
}
return split_at_least_one;
}
struct InstructionAndIndex {
HloInstruction* instruction;
int index;
InstructionAndIndex(HloInstruction* instruction, int index)
: instruction(instruction), index(index) {}
bool operator==(const InstructionAndIndex& other) const {
return instruction == other.instruction && index == other.index;
}
};
absl::StatusOr<InstructionAndIndex> WalkUpMemoryOffload(
InstructionAndIndex current_value, const CallGraph& call_graph) {
auto& [instruction, index] = current_value;
switch (instruction->opcode()) {
case HloOpcode::kGetTupleElement: {
CHECK_EQ(index, -1);
return InstructionAndIndex(instruction->mutable_operand(0),
instruction->tuple_index());
}
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
case HloOpcode::kCopy: {
return InstructionAndIndex(instruction->mutable_operand(0), index);
}
case HloOpcode::kTuple: {
return InstructionAndIndex(instruction->mutable_operand(index), -1);
}
case HloOpcode::kOptimizationBarrier: {
return InstructionAndIndex(instruction->mutable_operand(0), index);
}
case HloOpcode::kWhile: {
HloComputation* while_body = instruction->while_body();
HloInstruction* root = while_body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
return InstructionAndIndex(root, index);
}
case HloOpcode::kParameter: {
CHECK_NE(instruction->parent(),
instruction->GetModule()->entry_computation());
std::vector<HloInstruction*> callers =
call_graph.GetComputationCallers(instruction->parent());
if (callers.size() != 1) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller");
}
HloInstruction* caller = callers[0];
if (caller->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(
"Expected to be called by a while loop");
}
return InstructionAndIndex(caller->mutable_operand(0), index);
}
case HloOpcode::kDynamicUpdateSlice: {
return InstructionAndIndex(instruction->mutable_operand(0), index);
}
case HloOpcode::kCustomCall: {
if (!instruction->IsCustomCall("AllocateBuffer") &&
!instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
return absl::InvalidArgumentError(
"Expected AllocateBuffer or MoveToHost custom-call");
}
return InstructionAndIndex(instruction, index);
}
case HloOpcode::kBroadcast: {
HloInstruction* broadcast_operand = instruction->mutable_operand(0);
if (broadcast_operand->opcode() != HloOpcode::kConstant) {
return absl::InvalidArgumentError("Expected a constant as operand");
}
if (!ShapeUtil::IsEffectiveScalar(broadcast_operand->shape())) {
return absl::InvalidArgumentError("Expected a scalar broadcast");
}
return InstructionAndIndex(instruction, index);
}
default: {
return absl::InvalidArgumentError(
absl::StrFormat("Invalid opcode %s", instruction->ToString()));
}
}
}
absl::StatusOr<std::vector<InstructionAndIndex>> WalkDownMemoryOffload(
const InstructionAndIndex& current_value, const CallGraph& call_graph,
bool for_move_copy_phase) {
VLOG(6) << "Getting users of: \"" << current_value.instruction->ToString()
<< "\" at index " << current_value.index;
std::vector<InstructionAndIndex> results;
auto add_gte_for_idx = [&results](HloInstruction* instr,
int idx) -> absl::Status {
HloInstruction* gte = nullptr;
for (HloInstruction* user : instr->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return absl::InvalidArgumentError(
"Expected users to be only get-tuple-elements");
}
if (user->tuple_index() != idx) {
continue;
}
if (gte != nullptr) {
return absl::InvalidArgumentError(
"Expected to find only one gte per index.");
}
results.emplace_back(user, -1);
}
return absl::OkStatus();
};
if (current_value.instruction->user_count() == 0) {
if (current_value.instruction->IsRoot() &&
!current_value.instruction->parent()->IsEntryComputation()) {
std::vector<HloInstruction*> callers =
call_graph.GetComputationCallers(current_value.instruction->parent());
if (callers.size() != 1 || callers[0]->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(absl::StrFormat(
"Expected computation \"%s\" to be called only by one caller "
"and that caller to be a While. There are %d caller(s): [%s]",
current_value.instruction->parent()->name(), callers.size(),
absl::StrJoin(callers, ", ",
[](std::string* out, const HloInstruction* instr) {
absl::StrAppend(out, instr->name());
})));
}
TF_RETURN_IF_ERROR(add_gte_for_idx(callers[0], current_value.index));
return results;
}
}
if (current_value.instruction->opcode() == HloOpcode::kParameter &&
current_value.instruction->shape().IsTuple()) {
TF_RETURN_IF_ERROR(
add_gte_for_idx(current_value.instruction, current_value.index));
return results;
}
for (HloInstruction* user : current_value.instruction->users()) {
switch (user->opcode()) {
case HloOpcode::kGetTupleElement: {
CHECK_NE(user->tuple_index(), -1);
if (user->tuple_index() != current_value.index) {
continue;
}
results.emplace_back(user, -1);
break;
}
case HloOpcode::kTuple: {
auto output_indices = user->OperandIndices(current_value.instruction);
if (output_indices.size() != 1) {
return absl::InvalidArgumentError(
"Expected operand to be used only once in the tuple.");
}
results.emplace_back(user, output_indices[0]);
break;
}
case HloOpcode::kOptimizationBarrier: {
results.emplace_back(user, current_value.index);
break;
}
case HloOpcode::kWhile: {
HloComputation* while_body = user->while_body();
HloInstruction* parameter = while_body->parameter_instruction(0);
results.emplace_back(parameter, current_value.index);
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (user->OperandIndices(current_value.instruction)[0] != 0) {
return absl::InvalidArgumentError(
"Expected to be used by first operand of dynamic-update-slice");
}
results.emplace_back(user, current_value.index);
break;
}
case HloOpcode::kCustomCall: {
if (user->IsCustomCall(host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget)) {
results.emplace_back(user, current_value.index);
break;
}
return absl::InvalidArgumentError("Invalid custom-call found.");
}
case HloOpcode::kBitcast:
case HloOpcode::kCopy:
case HloOpcode::kDynamicSlice:
case HloOpcode::kReshape:
case HloOpcode::kSlice: {
results.emplace_back(user, current_value.index);
break;
}
case HloOpcode::kAsyncStart: {
if (user->async_execution_thread() == HloInstruction::kHostThread) {
CHECK(!for_move_copy_phase)
<< "Transpose copy going into host call is not supported yet.";
break;
}
[[fallthrough]];
}
default: {
return absl::InvalidArgumentError(
absl::StrFormat("Unrecognized user name: %s", user->name()));
}
}
}
return results;
}
void UpdateInstructionLayout(const InstructionAndIndex& instruction_and_index,
const Layout& new_layout) {
HloInstruction* instruction = instruction_and_index.instruction;
const int index = instruction_and_index.index;
VLOG(2) << " Updating " << instruction->name() << "'s layout "
<< instruction->shape().ToString(true) << " at index " << index
<< " to " << new_layout.ToString();
if (index != -1) {
*instruction->mutable_shape()
->mutable_tuple_shapes(index)
->mutable_layout() = new_layout;
} else {
VLOG(5) << " Instruction: " << instruction->ToString();
VLOG(5) << " New layout: " << new_layout.ToString();
*instruction->mutable_shape()->mutable_layout() = new_layout;
}
VLOG(3) << " Shape is now: " << instruction->shape().ToString(true);
if (instruction->opcode() == HloOpcode::kWhile) {
*instruction->while_body()
->root_instruction()
->mutable_shape()
->mutable_tuple_shapes(index)
->mutable_layout() = new_layout;
*instruction->while_condition()
->parameter_instruction(0)
->mutable_shape()
->mutable_tuple_shapes(index)
->mutable_layout() = new_layout;
}
}
Shape RemoveMajormostDimension(const Shape& shape) {
CHECK(shape.has_layout()) << "Shape must have layout.";
const int size = shape.layout().minor_to_major_size();
const int64_t majormost_dim = shape.layout().minor_to_major(size - 1);
return ShapeUtil::DeleteDimension(majormost_dim, shape);
}
absl::Status MoveCopy(
const InstructionAndIndex& copy_to_move_instruction_and_index,
const CallGraph* call_graph,
absl::flat_hash_set<HloInstruction*>& processed_annotations,
absl::flat_hash_set<HloInstruction*>& to_remove) {
HloInstruction* copy_to_move = copy_to_move_instruction_and_index.instruction;
VLOG(5) << "Moving copy: " << copy_to_move->ToString();
struct InstructionAndShapes {
InstructionAndShapes(InstructionAndIndex idx, Shape s_before, Shape s_after)
: instruction_and_index(idx),
shape_before_copy(s_before),
shape_after_copy(s_after) {}
InstructionAndIndex instruction_and_index;
Shape shape_before_copy;
Shape shape_after_copy;
};
std::vector<InstructionAndShapes> stack = {InstructionAndShapes(
copy_to_move_instruction_and_index, copy_to_move->operand(0)->shape(),
copy_to_move->shape())};
while (!stack.empty()) {
InstructionAndShapes current_instruction_and_shapes = stack.back();
InstructionAndIndex current_instruction_and_index =
current_instruction_and_shapes.instruction_and_index;
stack.pop_back();
VLOG(5) << "Current top of stack: "
<< current_instruction_and_index.instruction->ToString()
<< ", index: " << current_instruction_and_index.index;
absl::StatusOr<std::vector<InstructionAndIndex>> current_value_down =
WalkDownMemoryOffload(current_instruction_and_index, *call_graph,
true);
if (!current_value_down.ok()) {
VLOG(5) << "WalkDownMemoryOffload failed: "
<< current_value_down.status();
break;
}
for (InstructionAndIndex& instruction_and_index :
current_value_down.value()) {
HloInstruction* instruction = instruction_and_index.instruction;
Shape shape_before_copy =
current_instruction_and_shapes.shape_before_copy;
Shape shape_after_copy = current_instruction_and_shapes.shape_after_copy;
VLOG(5) << "Evaluating successor: " << instruction->ToString();
const int index = instruction_and_index.index;
if (instruction->opcode() == HloOpcode::kBitcast) {
const Shape& before_bitcast_shape = instruction->operand(0)->shape();
const Shape& after_bitcast_shape = instruction->shape();
if (!Shape::Equal().IgnoreLayout()(copy_to_move->operand(0)->shape(),
copy_to_move->shape())) {
return absl::InternalError(absl::StrFormat(
"Expecting copy to only change instructions layout. Copy: %s",
copy_to_move->ToString()));
}
if (after_bitcast_shape.rank() != before_bitcast_shape.rank() - 1) {
return absl::InternalError(
absl::StrFormat("Only handling bitcasts which remove 0'th "
"dimension. This bitcast is \"%s\"",
instruction->ToString()));
}
if (!(ShapeUtil::IsEffectivelyMostMajorDimension(before_bitcast_shape,
0) &&
before_bitcast_shape.dimensions(0) == 1)) {
return absl::InternalError(
absl::StrFormat("Only handling bitcasts with majormost dimension "
"of size 1. This bitcast is \"%s\"",
instruction->ToString()));
}
const Shape new_bitcast_shape =
RemoveMajormostDimension(shape_before_copy);
VLOG(2) << absl::StreamFormat(
" Encountered bitcast \"%s\", updating current shape from %s to %s",
instruction->name(), shape_before_copy.ToString(true),
new_bitcast_shape.ToString(true));
shape_before_copy = new_bitcast_shape;
const Shape new_copy_shape = RemoveMajormostDimension(shape_after_copy);
VLOG(2) << absl::StreamFormat(
" Also updating shape after copy from %s to %s",
shape_after_copy.ToString(true), new_copy_shape.ToString(true));
shape_after_copy = new_copy_shape;
} else if (instruction->opcode() == HloOpcode::kSlice ||
instruction->opcode() == HloOpcode::kDynamicSlice) {
Shape new_copy_shape = instruction->shape();
*new_copy_shape.mutable_layout() = shape_after_copy.layout();
VLOG(2) << absl::StreamFormat(
" Encountered %s \"%s\", updating shape after copy from "
"%s to %s",
HloOpcodeString(instruction->opcode()), instruction->name(),
shape_after_copy.ToString(true), new_copy_shape.ToString(true));
shape_after_copy = new_copy_shape;
}
UpdateInstructionLayout(instruction_and_index,
shape_before_copy.layout());
if (instruction->opcode() == HloOpcode::kParameter) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
if (callers.size() != 1) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller");
}
HloInstruction* caller = callers[0];
UpdateInstructionLayout(InstructionAndIndex(caller, index),
shape_before_copy.layout());
}
CHECK_NE(instruction->opcode(), HloOpcode::kCopy)
<< "Copies should be processed in reverse order so this never "
"happens";
if (absl::c_linear_search(kUsersOpcodes, instruction->opcode()) ||
instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
HloInstruction* annotation =
FindToDeviceAnnotationToUpdate(instruction);
CHECK_NE(annotation, nullptr)
<< "We already verified we could find an annotation here. "
"Something went wrong.";
HloInstruction* new_annotation = nullptr;
if (instruction->opcode() == HloOpcode::kCustomCall) {
new_annotation = annotation;
} else {
new_annotation =
instruction->AddInstruction(annotation->CloneWithNewOperands(
instruction->shape(), {instruction}));
}
UpdateInstructionLayout(InstructionAndIndex(new_annotation, -1),
shape_before_copy.layout());
VLOG(3) << absl::StreamFormat("Creating copy with shape %s",
shape_after_copy.ToString(true));
HloInstruction* new_copy =
instruction->AddInstruction(copy_to_move->CloneWithNewOperands(
shape_after_copy, {new_annotation}));
VLOG(2) << absl::StreamFormat("Inserting copy \"%s\" after \"%s\"",
new_copy->name(), instruction->name());
std::vector<HloInstruction*> users = instruction->users();
for (HloInstruction* use : users) {
if (use == new_copy || use == new_annotation) {
continue;
}
TF_RETURN_IF_ERROR(
instruction->ReplaceUseWithDifferentShape(use, new_copy));
}
if (new_annotation != annotation) {
TF_RETURN_IF_ERROR(annotation->ReplaceAllUsesWithDifferentShape(
annotation->mutable_operand(0)));
to_remove.insert(annotation);
}
continue;
}
if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
HloInstruction* annotation =
FindToHostAnnotationToUpdate(instruction->mutable_operand(1));
if (annotation == nullptr) {
return absl::InternalError("Annotation not found.");
}
CHECK(annotation->opcode() == HloOpcode::kCustomCall);
HloInstruction* new_annotation =
instruction->AddInstruction(annotation->CloneWithNewOperands(
instruction->operand(1)->shape(),
{instruction->mutable_operand(1)}));
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(1, new_annotation));
TF_RETURN_IF_ERROR(
annotation->ReplaceAllUsesWith(annotation->mutable_operand(0)));
processed_annotations.insert(annotation);
processed_annotations.insert(new_annotation);
to_remove.insert(annotation);
if (instruction->shape().layout().minor_to_major() !=
instruction->operand(1)->shape().layout().minor_to_major()) {
HloInstruction* update_slice = instruction->mutable_operand(1);
CHECK(update_slice->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget));
*update_slice->mutable_shape()->mutable_layout() =
instruction->shape().layout();
HloInstruction* new_copy =
update_slice->AddInstruction(HloInstruction::CreateUnary(
update_slice->shape(), HloOpcode::kCopy,
update_slice->mutable_operand(0)));
TF_RETURN_IF_ERROR(update_slice->ReplaceOperandWith(0, new_copy));
}
}
stack.emplace_back(instruction_and_index, shape_before_copy,
shape_after_copy);
}
}
VLOG(2) << absl::StreamFormat("Removing copy \"%s\"",
copy_to_move->ToString());
TF_RETURN_IF_ERROR(copy_to_move->ReplaceAllUsesWithDifferentShape(
copy_to_move->mutable_operand(0)));
TF_RETURN_IF_ERROR(copy_to_move->parent()->RemoveInstruction(copy_to_move));
return absl::OkStatus();
}
absl::StatusOr<bool> ProcessAnnotationForCopyMovement(
HloInstruction* instruction, const CallGraph* call_graph,
absl::flat_hash_set<HloInstruction*>& processed_annotations,
absl::flat_hash_set<HloInstruction*>& to_remove) {
VLOG(2) << "Walking down graph starting at instruction "
<< instruction->name();
if (instruction->IsRoot()) {
return false;
}
if (instruction->user_count() == 0) {
return false;
}
HloInstruction* starting_instr =
FindDUSFromAnnotation(instruction->users().at(0));
if (starting_instr->opcode() != HloOpcode::kDynamicUpdateSlice) {
starting_instr = instruction;
}
if (!(starting_instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) ||
IsEntryComputationParameter(starting_instr) ||
starting_instr->opcode() == HloOpcode::kDynamicUpdateSlice)) {
return absl::InternalError(
"Starting instruction must be a move-to-host annotation, entry "
"computation parameter, or dynamic-update-slice.");
}
VLOG(2) << "Effective starting instruction: " << starting_instr->name();
InstructionAndIndex current_value(starting_instr, -1);
processed_annotations.insert(current_value.instruction);
if (current_value.instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
while (true) {
VLOG(10) << "Current value before: "
<< current_value.instruction->ToString();
absl::StatusOr<InstructionAndIndex> current_value_up =
WalkUpMemoryOffload(current_value, *call_graph);
if (!current_value_up.ok()) {
return false;
}
if (current_value_up.value() == current_value) {
break;
}
current_value = current_value_up.value();
VLOG(10) << "Current value after: "
<< current_value.instruction->ToString();
HloInstruction* annotation = current_value.instruction;
if (annotation->opcode() == HloOpcode::kDynamicUpdateSlice) {
HloInstruction* real_annotation =
FindToHostAnnotationToUpdate(annotation->mutable_operand(1));
if (!real_annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
return false;
}
}
}
}
std::vector<InstructionAndIndex> copies_to_move;
std::vector<InstructionAndIndex> stack = {current_value};
while (!stack.empty()) {
VLOG(5) << "Current value before down: "
<< stack.back().instruction->ToString() << " "
<< stack.back().index;
if (absl::c_linear_search(kUsersOpcodes,
stack.back().instruction->opcode()) ||
stack.back().instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
HloInstruction* annotation =
FindToDeviceAnnotationToUpdate(stack.back().instruction);
if (!annotation ||
!annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
VLOG(5) << "Couldn't find annotation for consumer instruction in chain";
return false;
}
if (annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
for (HloInstruction* user : annotation->users()) {
HloInstruction* root_instruction =
annotation->parent()->root_instruction();
if (root_instruction == user &&
root_instruction->opcode() == HloOpcode::kTuple &&
!root_instruction->parent()->IsEntryComputation()) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(annotation->parent());
if (callers.size() != 1 ||
callers[0]->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(absl::StrFormat(
"Expected computation \"%s\" to be called only by one caller "
"and that caller to be a While. There are %d caller(s): [%s]",
current_value.instruction->parent()->name(), callers.size(),
absl::StrJoin(
callers, ", ",
[](std::string* out, const HloInstruction* instr) {
absl::StrAppend(out, instr->name());
})));
}
for (int i = 0; i < user->operands().size(); i++) {
if (user->operands()[i] == annotation &&
annotation->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
annotation->operand(0)->operand(0)->opcode() ==
HloOpcode::kParameter &&
annotation->operand(0)->tuple_index() == i) {
user->ReplaceOperandWith(i, annotation->mutable_operand(0))
.IgnoreError();
}
}
}
}
}
stack.pop_back();
continue;
}
absl::StatusOr<std::vector<InstructionAndIndex>> current_value_down =
WalkDownMemoryOffload(stack.back(), *call_graph,
false);
if (!current_value_down.ok()) {
VLOG(5) << "Current value down failed: " << current_value_down.status();
break;
}
stack.pop_back();
stack.insert(stack.end(), current_value_down.value().begin(),
current_value_down.value().end());
for (InstructionAndIndex& instruction_and_index :
current_value_down.value()) {
VLOG(5) << "Current value last down: "
<< stack.back().instruction->ToString();
if (instruction_and_index.instruction->opcode() == HloOpcode::kCopy) {
VLOG(1) << absl::StreamFormat(
" Found a copy to move: \"%s\"",
instruction_and_index.instruction->name());
copies_to_move.push_back(instruction_and_index);
}
}
}
if (copies_to_move.empty()) {
return false;
}
for (auto it = copies_to_move.rbegin(); it != copies_to_move.rend(); ++it) {
TF_RETURN_IF_ERROR(
MoveCopy(*it, call_graph, processed_annotations, to_remove));
}
return true;
}
absl::StatusOr<bool> FixupInterveningCopies(
const std::vector<HloInstruction*>& starting_instructions,
const CallGraph* call_graph) {
absl::flat_hash_set<HloInstruction*> processed_annotations;
absl::flat_hash_set<HloInstruction*> annotations_to_remove;
bool changed = false;
for (HloInstruction* instruction : starting_instructions) {
if (processed_annotations.contains(instruction)) {
continue;
}
TF_ASSIGN_OR_RETURN(bool changed_annotation_for_copy_movement,
ProcessAnnotationForCopyMovement(
instruction, call_graph, processed_annotations,
annotations_to_remove));
changed |= changed_annotation_for_copy_movement;
}
for (HloInstruction* instruction : annotations_to_remove) {
TF_RETURN_IF_ERROR(instruction->parent()->RemoveInstruction(instruction));
}
return changed;
}
}
std::vector<HloInstruction*>
HostOffloadLegalize::FindStartingInstructionsOfHostMemoryOffload(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::vector<HloInstruction*> starting_instructions;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsEntryComputationParameter(instruction)) {
Shape param_shape =
module->entry_computation_layout()
.parameter_layout(instruction->parameter_number())
.shape();
if (param_shape.has_layout() &&
param_shape.layout().memory_space() == kHostMemorySpaceColor) {
starting_instructions.push_back(instruction);
continue;
}
}
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
starting_instructions.push_back(instruction);
}
}
}
return starting_instructions;
}
absl::StatusOr<bool> HostOffloadLegalize::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(bool duplicated_at_least_one_broadcast,
DuplicateBroadcastForEachUse(module));
if (duplicated_at_least_one_broadcast) {
changed = true;
}
if (!after_layout_) {
return changed;
}
std::vector<HloInstruction*> starting_instructions =
FindStartingInstructionsOfHostMemoryOffload(module, execution_threads);
VLOG(1) << absl::StreamFormat(
"Starting instructions for host memory offload: [%s]",
absl::StrJoin(starting_instructions, ", ",
[](std::string* out, HloInstruction* instruction) {
return absl::StrAppend(out, instruction->name());
}));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
TF_ASSIGN_OR_RETURN(
bool changed_intervening_copies,
FixupInterveningCopies(starting_instructions, call_graph.get()));
changed |= changed_intervening_copies;
return changed;
}
} | #include "xla/service/host_offload_legalize.h"
#include <cstdint>
#include <stack>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HostOffloadLegalizeTest : public HloTestBase {
protected:
static constexpr int64_t kHostMemorySpaceColor{5};
absl::StatusOr<bool> RunHostOffloadLegalize(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostOffloadLegalize host_offload_legalize(kHostMemorySpaceColor,
true);
return host_offload_legalize.Run(module);
}
void TestShapeHasMemorySpace(const Shape& shape, int64_t memory_space) {
ASSERT_TRUE(shape.has_layout());
EXPECT_EQ(shape.layout().memory_space(), memory_space);
}
bool HaveRemainingOffloadAnnotations(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget,
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget})) {
return true;
}
}
}
return false;
}
};
TEST_F(HostOffloadLegalizeTest, TestWithAsyncCall) {
const std::string& hlo_string = R"(
HloModule jit_update, entry_computation_layout={(f32[20,3,256,133]{2,3,1,0:T(8,128)S(5)})->(f32[20,3,256,133]{2,1,0,3:T(4,128)}, f32[4096]{0:T(1024)})}
%async_computation {
%param_0 = f32[20,3,256,133] parameter(0)
ROOT %offloaded-custom-call = f32[4096] custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY main {
%param.246 = f32[20,3,256,133] parameter(0)
%async-start = ((f32[20,3,256,133]), f32[4096], u32[]) async-start(%param.246), async_execution_thread="host", calls=%async_computation
%async-done = f32[4096] custom-call-done(%async-start)
copy.16744 = f32[20,3,256,133]{2,1,0,3:T(4,128)} copy(param.246)
custom-call.7832 = f32[20,3,256,133]{2,1,0,3:T(4,128)} custom-call(copy.16744), custom_call_target="MoveToDevice"
ROOT tuple.16745 = (f32[20,3,256,133]{2,1,0,3:T(4,128)}, f32[4096]{0:T(1024)}) tuple(custom-call.7832, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* custom_call =
FindInstruction(module.get(), "custom-call.7832");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, NoCopyWithOptBarrierMoreElaborate) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main.24 {
Arg_0.1 = f32[16,256]{0,1} parameter(0)
cosine.4 = f32[16,256]{0,1} cosine(Arg_0.1)
custom-call.5 = f32[16,256]{0,1} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16,256]{0,1} sine(Arg_0.1)
cosine.7 = f32[16,256]{0,1} cosine(sine.3)
custom-call.8 = f32[16,256]{0,1} custom-call(cosine.7), custom_call_target="MoveToHost"
sine.6 = f32[16,256]{0,1} sine(sine.3)
cosine.9 = f32[16,256]{0,1} cosine(sine.6)
custom-call.10 = f32[16,256]{0,1} custom-call(cosine.9), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
cp = f32[16,256]{1,0} copy(custom-call.8)
tuple.11 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{0,1}, f32[]) tuple(custom-call.5, cp, custom-call.10, constant.2)
opt-barrier.12 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{0,1}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16,256]{0,1} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16,256]{0,1} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16,256]{0,1} multiply(broadcast.20, custom-call.19)
cp2 = f32[16,256]{1,0} copy(multiply.21)
get-tuple-element.14 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16,256]{1,0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16,256]{1,0} multiply(cp2, custom-call.18)
get-tuple-element.13 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16,256]{0,1} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
cp3 = f32[16,256]{1,0} copy(custom-call.17)
ROOT multiply.23 = f32[16,256]{1,0} multiply(multiply.22, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.18");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(), LayoutUtil::MakeLayout({0, 1}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
}
TEST_F(HostOffloadLegalizeTest, XposeCopyOnParameterStreaming) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1},f32[16,256]{0,1:T(8,128)S(5)})->f32[16,256]{1,0}}
ENTRY main.24 {
Arg_0.1 = f32[16,256]{0,1} parameter(0)
Arg_0.2 = f32[16,256]{0,1:T(8,128)} parameter(1)
cp0 = f32[16,256]{1,0} copy(Arg_0.2)
cosine.4 = f32[16,256]{0,1} cosine(Arg_0.1)
custom-call.5 = f32[16,256]{0,1} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16,256]{0,1} sine(Arg_0.1)
cosine.7 = f32[16,256]{0,1} cosine(sine.3)
custom-call.8 = f32[16,256]{0,1} custom-call(cosine.7), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
cp1 = f32[16,256]{1,0} copy(custom-call.8)
tuple.11 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{1,0}, f32[]) tuple(custom-call.5, cp1, cp0, constant.2)
opt-barrier.12 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{1,0}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16,256]{0,1} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16,256]{1,0} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16,256]{0,1} multiply(broadcast.20, custom-call.19)
cp2 = f32[16,256]{1,0} copy(multiply.21)
get-tuple-element.14 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16,256]{1,0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16,256]{1,0} multiply(cp2, custom-call.18)
get-tuple-element.13 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16,256]{0,1} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
cp3 = f32[16,256]{1,0} copy(custom-call.17)
ROOT multiply.23 = f32[16,256]{1,0} multiply(multiply.22, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.18");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(), LayoutUtil::MakeLayout({0, 1}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
custom_call = FindInstruction(module.get(), "custom-call.19");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({0, 1}, {}, {}, {}, {Tile{{8, 128}}}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
}
TEST_F(HostOffloadLegalizeTest, DUSSameLayoutForOperandAndUpdate_1) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(bf16[16,512,532]{1,2,0})->bf16[1,16,512,532]{2,3,1,0}}
ENTRY main.24 {
constant_0 = s32[] constant(0)
cs0 = bf16[] constant(0)
broadcast = bf16[20,16,512,532]{3,2,1,0} broadcast(cs0), dimensions={}
cp = bf16[20,16,512,532]{3,2,1,0} copy(broadcast)
custom-call.8 = bf16[20,16,512,532]{3,2,1,0} custom-call(cp), custom_call_target="MoveToHost"
copy = bf16[20,16,512,532]{2,3,1,0} copy(custom-call.8)
arg1 = bf16[16,512,532]{1,2,0} parameter(0)
copy.17302 = bf16[16,512,532]{2,1,0} copy(arg1)
bitcast.6100 = bf16[1,16,512,532]{3,2,1,0} bitcast(copy.17302)
copy.20241 = bf16[1,16,512,532]{2,3,1,0} copy(bitcast.6100)
custom-call.6720 = bf16[1,16,512,532]{2,3,1,0} custom-call(copy.20241), custom_call_target="MoveToHost"
dynamic-update-slice.6830 = bf16[20,16,512,532]{2,3,1,0} dynamic-update-slice(copy, custom-call.6720, constant_0, constant_0, constant_0, constant_0)
dynamic_slice_0 = bf16[1,16,512,532]{2,3,1,0} dynamic-slice(dynamic-update-slice.6830, constant_0, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,16,512,532}
ROOT custom_call_0.1 = bf16[1,16,512,532]{2,3,1,0} custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* dus =
FindInstruction(module.get(), "dynamic-update-slice.6830");
ASSERT_NE(dus, nullptr);
EXPECT_EQ(dus->operand(0)->shape().layout(),
dus->operand(1)->shape().layout());
EXPECT_EQ(dus->shape().layout(), dus->operand(1)->shape().layout());
const HloInstruction* custom_call =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_TRUE(custom_call->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget));
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({3, 2, 1, 0}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({2, 3, 1, 0}));
}
TEST_F(HostOffloadLegalizeTest, DUSSameLayoutForOperandAndUpdate_2) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(bf16[16,512,532]{1,2,0})->bf16[1,16,512,532]{2,3,1,0}}
ENTRY main.24 {
constant_0 = s32[] constant(0)
cs0 = bf16[] constant(0)
broadcast = bf16[20,16,512,532]{3,2,1,0} broadcast(cs0), dimensions={}
cp = bf16[20,16,512,532]{3,2,1,0} copy(broadcast)
custom-call.8 = bf16[20,16,512,532]{3,2,1,0} custom-call(cp), custom_call_target="MoveToHost"
copy = bf16[20,16,512,532]{2,3,1,0} copy(custom-call.8)
arg1 = bf16[16,512,532]{1,2,0} parameter(0)
copy.17302 = bf16[16,512,532]{2,1,0} copy(arg1)
custom-call.6720 = bf16[16,512,532]{2,1,0} custom-call(copy.17302), custom_call_target="MoveToHost"
bitcast.6100 = bf16[1,16,512,532]{3,2,1,0} bitcast(custom-call.6720)
copy.20241 = bf16[1,16,512,532]{2,3,1,0} copy(bitcast.6100)
dynamic-update-slice.6830 = bf16[20,16,512,532]{2,3,1,0} dynamic-update-slice(copy, copy.20241, constant_0, constant_0, constant_0, constant_0)
dynamic_slice_0 = bf16[1,16,512,532]{2,3,1,0} dynamic-slice(dynamic-update-slice.6830, constant_0, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,16,512,532}
ROOT custom_call_0.1 = bf16[1,16,512,532]{2,3,1,0} custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* dus =
FindInstruction(module.get(), "dynamic-update-slice.6830");
ASSERT_NE(dus, nullptr);
EXPECT_EQ(dus->operand(0)->shape().layout(),
dus->operand(1)->shape().layout());
EXPECT_EQ(dus->shape().layout(), dus->operand(1)->shape().layout());
const HloInstruction* custom_call =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_TRUE(custom_call->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget));
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({3, 2, 1, 0}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({2, 3, 1, 0}));
}
TEST_F(HostOffloadLegalizeTest, LlmActivationHostMemoryMultipleConsumers) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
cs0 = f32[] constant(0)
broadcast_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} broadcast(cs0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(producing_while), index=1
cp = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(while_output_1)
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(constant_s32_0, cp)
consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy = FindInstruction(module.get(), HloOpcode::kCopy);
HloInstruction* consuming_while =
FindInstruction(module.get(), "consuming_while");
ASSERT_NE(copy, nullptr);
ASSERT_NE(consuming_while, nullptr);
EXPECT_NE(copy, nullptr);
EXPECT_NE(consuming_while, nullptr);
EXPECT_EQ(copy->parent(), consuming_while->while_body());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, LlmActivationHostMemoryMultipleCopies) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
cs0 = f32[] constant(0)
broadcast_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} broadcast(cs0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(producing_while), index=1
cp = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(while_output_1)
cp1 = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(cp)
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(constant_s32_0, cp1)
consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_0 = FindInstruction(module.get(), "cp.2");
HloInstruction* copy_1 = FindInstruction(module.get(), "cp1.2");
HloInstruction* consuming_while =
FindInstruction(module.get(), "consuming_while");
ASSERT_NE(copy_0, nullptr);
ASSERT_NE(copy_1, nullptr);
ASSERT_NE(consuming_while, nullptr);
EXPECT_NE(copy_0, nullptr);
EXPECT_NE(copy_1, nullptr);
EXPECT_NE(consuming_while, nullptr);
EXPECT_EQ(copy_0->parent(), module->entry_computation());
EXPECT_EQ(copy_1->operand(0), copy_0);
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, MoveCopyOverBitcast) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(bf16[1,1,16384,4,256]{4,3,2,1,0:T(4,128)(2,1)S(5)})->bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)}}
ENTRY main {
param = bf16[1,1,16384,4,256]{4,3,2,1,0:T(4,128)(2,1)} parameter(0)
copy = bf16[1,1,16384,4,256]{4,2,3,1,0:T(8,128)(2,1)} copy(param)
bitcast = bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)} bitcast(copy)
custom-call = bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)} custom-call(bitcast), custom_call_target="MoveToDevice"
ROOT add = bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)} add(custom-call, custom-call)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({3, 2, 1, 0}, {}, {}, {},
{Tile{{4, 128}}, Tile{{2, 1}}}));
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({3, 1, 2, 0}, {}, {}, {},
{Tile{{8, 128}}, Tile{{2, 1}}}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_legalize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_legalize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ce8c34f-84df-4adc-a5d2-a1671a470374 | cpp | tensorflow/tensorflow | collective_pipeliner | third_party/xla/xla/service/collective_pipeliner.cc | third_party/xla/xla/service/collective_pipeliner_test.cc | #include "xla/service/collective_pipeliner.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instruction_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/constant_value.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/service/value_range.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
const char* const CollectivePipeliner::kInsertedByPreviousStep =
"InsertedByPreviousStep";
const char* const CollectivePipeliner::kSunkByPreviousStep =
"SunkByPreviousStep";
namespace {
using InstructionMap =
absl::flat_hash_map<const HloInstruction*, HloInstruction*>;
using LoopVariantParameterInfo =
std::vector<std::pair<int64_t, HloInstruction*>>;
absl::Status UpdateControlDependencies(HloInstruction* original,
HloInstruction* new_instr,
const InstructionMap& cloned_map) {
for (auto* pred : original->control_predecessors()) {
auto it = cloned_map.find(pred);
if (it == cloned_map.end()) {
continue;
}
TF_RETURN_IF_ERROR(it->second->AddControlDependencyTo(new_instr));
}
return absl::OkStatus();
}
bool AllIndicesConstantsExceptOne(
const HloDynamicUpdateSliceInstruction* dyn_update, int64_t index) {
if (dyn_update->operand(index)->IsConstant()) {
return false;
}
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
if (i == index) {
continue;
}
if (!dyn_update->operand(i)->IsConstant()) {
return false;
}
}
return true;
}
std::optional<int> GetSlicedDimension(
const HloDynamicUpdateSliceInstruction* dyn_update) {
std::optional<int> sliced_dim;
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
const HloInstruction* idx = dyn_update->operand(i);
if (!idx->IsConstant()) {
if (sliced_dim.has_value()) {
return std::nullopt;
}
sliced_dim = i - dyn_update->first_index_operand_number();
continue;
}
if (Cast<HloConstantInstruction>(idx)->literal().GetFirstInteger() != 0) {
return std::nullopt;
}
}
return sliced_dim;
}
bool CheckIndexIsMonotonic(
const HloInstruction* index,
const absl::flat_hash_map<const HloInstruction*, Range>& induction_map) {
Range range = RecursivelyIdentifyRange(index, induction_map);
VLOG(5) << "Range for: " << index->ToString() << " " << range.ToString();
return !range.IsEmpty() && range.IsLinear();
}
bool CheckParameterUsageIsCompatible(const HloInstruction* gte,
const HloInstruction* dus,
const HloInstruction* dus_idx,
int64_t sliced_index) {
for (auto* user : gte->users()) {
if (dus != user) {
VLOG(5) << "CheckParameterUsageIsCompatible(): User not a dynamic slice "
"or the dynamic-update-slice for the output."
<< user->ToString();
return false;
}
if (user->operand(static_cast<HloDynamicSliceInstruction*>(user)
->first_index_operand_number() +
sliced_index) != dus_idx) {
VLOG(5) << "CheckParameterUsageIsCompatible(): Idx is not the same as "
"dynamic-update-slice() "
<< user->ToString();
return false;
}
}
return true;
}
std::optional<int64_t> GetLevelFromCustomCall(const HloInstruction* instr) {
if (!instr->IsCustomCall(CollectivePipeliner::kInsertedByPreviousStep)) {
return std::nullopt;
}
return Cast<HloConstantInstruction>(instr->operand(1))
->literal()
.GetFirstInteger();
}
std::optional<std::vector<HloInstruction*>>
CollectDynamicSliceIndicesIfConstant(HloInstruction* instr) {
CHECK_EQ(instr->opcode(), HloOpcode::kDynamicSlice);
std::vector<HloInstruction*> indices;
HloDynamicSliceInstruction* dyn_slice =
Cast<HloDynamicSliceInstruction>(instr);
for (int64_t i = dyn_slice->first_index_operand_number();
i < instr->operand_count(); ++i) {
HloInstruction* operand = dyn_slice->mutable_operand(i);
CHECK_EQ(operand->shape().dimensions_size(), 0);
std::vector<std::pair<HloInstruction*, int>> stack(
1, std::make_pair(operand, 0));
absl::flat_hash_set<HloInstruction*> visited;
while (!stack.empty()) {
auto& [curr_instr, operand_idx] = stack.back();
if (operand_idx == curr_instr->operand_count()) {
indices.push_back(curr_instr);
stack.pop_back();
continue;
}
HloInstruction* next_operand = curr_instr->mutable_operand(operand_idx++);
if (next_operand->opcode() == HloOpcode::kParameter ||
next_operand->HasSideEffect()) {
return std::nullopt;
}
if (visited.insert(next_operand).second) {
stack.push_back(std::make_pair(next_operand, 0));
}
}
}
return indices;
}
bool IsSupportedLoopIndexType(PrimitiveType type) {
switch (type) {
case PrimitiveType::S32:
case PrimitiveType::S64:
case PrimitiveType::S16:
case PrimitiveType::S8:
case PrimitiveType::U32:
case PrimitiveType::U64:
case PrimitiveType::U16:
case PrimitiveType::U8:
return true;
default:
return false;
}
}
std::optional<Literal> CreateLiteralOfShape(const Shape& shape, int64_t value) {
return primitive_util::PrimitiveTypeSwitch<std::optional<Literal>>(
[&](auto kType) -> std::optional<Literal> {
if constexpr (primitive_util::IsIntegralType(kType)) {
using NativeT = typename primitive_util::NativeTypeOf<kType>;
CHECK_LE(value, static_cast<absl::int128>(
std::numeric_limits<NativeT>::max()));
CHECK_GE(value, static_cast<absl::int128>(
std::numeric_limits<NativeT>::min()));
return LiteralUtil::CreateR0(static_cast<NativeT>(value));
}
return std::nullopt;
},
shape.element_type());
}
bool CollectSimpleDependencies(HloInstruction* i,
std::vector<HloInstruction*>& deps_vector,
absl::flat_hash_set<HloInstruction*>& deps_set) {
if (i->opcode() == HloOpcode::kDynamicSlice) {
auto indices = CollectDynamicSliceIndicesIfConstant(i);
if (!indices.has_value()) {
return false;
}
deps_vector.insert(deps_vector.end(), indices->begin(), indices->end());
deps_set.insert(indices->begin(), indices->end());
return true;
}
for (HloInstruction* op : i->mutable_operands()) {
absl::InlinedVector<HloInstruction*, 4> to_add;
if (op->opcode() == HloOpcode::kBroadcast) {
if (deps_set.insert(op).second) {
to_add.push_back(op);
op = op->mutable_operand(0);
if (op->opcode() == HloOpcode::kConstant) {
if (deps_set.insert(op).second) {
to_add.push_back(op);
}
}
}
}
deps_vector.insert(deps_vector.end(), to_add.rbegin(), to_add.rend());
}
return true;
}
std::pair<std::vector<HloDynamicUpdateSliceInstruction*>,
std::vector<HloInstruction*>>
CheckStoreIntoSliceIsCompatible(HloInstruction* instr,
const HloComputation* while_body,
int64_t level_to_operate_on,
bool multi_uses_pipelining,
HloPredicate acceptable_formatting,
bool multi_dyn_updates = false) {
std::pair<std::vector<HloDynamicUpdateSliceInstruction*>,
std::vector<HloInstruction*>>
empty_pair{{}, {}};
if ((!multi_uses_pipelining && instr->user_count() != 1) ||
instr->operand_count() != 1 || instr->HasControlDependencies()) {
return empty_pair;
}
absl::flat_hash_set<HloInstruction*> added_instructions;
HloInstruction* folded_instr = instr;
std::vector<HloInstruction*> formatting_ops;
absl::flat_hash_set<HloInstruction*> formatting_set;
auto is_acceptable_user = [&](HloInstruction* i) {
if (i->HasControlDependencies() || !acceptable_formatting(i)) {
return false;
}
if (i->opcode() == HloOpcode::kReduce &&
(ShapeUtil::ElementsIn(i->shape()) ==
ShapeUtil::ElementsIn(instr->operand(0)->shape()) ||
ShapeUtil::ElementsIn(instr->operand(0)->shape()) < 1024)) {
return true;
}
return HloPredicateIsOp<HloOpcode::kSlice, HloOpcode::kDynamicSlice,
HloOpcode::kPad, HloOpcode::kCollectivePermute,
HloOpcode::kConvert, HloOpcode::kReshape,
HloOpcode::kAllReduce, HloOpcode::kTranspose,
HloOpcode::kBroadcast, HloOpcode::kBitcast>(i) ||
(multi_uses_pipelining && i->IsElementwise()) ||
i->IsCustomCall(CollectivePipeliner::kInsertedByPreviousStep) ||
i->IsCustomCall(CollectivePipeliner::kSunkByPreviousStep);
};
auto is_final_slice_insertion = [&](HloInstruction* i) {
HloDynamicUpdateSliceInstruction* dyn_update =
DynCast<HloDynamicUpdateSliceInstruction>(i);
if (dyn_update == nullptr || dyn_update->user_count() != 1) {
return false;
}
if (level_to_operate_on == 0) {
if (dyn_update->users()[0] == while_body->root_instruction()) {
return true;
}
return false;
}
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
if (auto level = GetLevelFromCustomCall(dyn_update->operand(i))) {
if (*level == level_to_operate_on) {
return true;
}
return false;
}
}
return false;
};
absl::flat_hash_set<HloInstruction*> final_slice_set;
std::vector<HloDynamicUpdateSliceInstruction*> final_slice_insertions;
std::vector<std::pair<HloInstruction*, int>> stack;
stack.push_back(std::make_pair(folded_instr, 0));
while (!stack.empty()) {
auto& data = stack.back();
HloInstruction* inst = data.first;
if (data.second == inst->user_count()) {
stack.pop_back();
continue;
}
HloInstruction* next_user = inst->users()[data.second++];
if (is_final_slice_insertion(next_user)) {
if (next_user->user_count() != 1 || next_user->operand(1) != inst) {
return empty_pair;
}
if (final_slice_set.contains(next_user)) {
continue;
}
if (!multi_dyn_updates && !final_slice_insertions.empty()) {
return empty_pair;
}
final_slice_insertions.push_back(
Cast<HloDynamicUpdateSliceInstruction>(next_user));
final_slice_set.insert(next_user);
continue;
}
if (!is_acceptable_user(next_user)) {
return empty_pair;
}
if (added_instructions.insert(next_user).second) {
stack.push_back(std::make_pair(next_user, 0));
}
}
if (final_slice_insertions.empty()) {
return empty_pair;
}
stack.push_back(std::make_pair(folded_instr, 0));
added_instructions.clear();
while (!stack.empty()) {
auto& data = stack.back();
HloInstruction* instr = data.first;
if (data.second == 0 && instr != folded_instr) {
if (!CollectSimpleDependencies(instr, formatting_ops, formatting_set)) {
return empty_pair;
}
if (formatting_set.insert(instr).second) {
formatting_ops.push_back(instr);
}
}
if (data.second == instr->user_count()) {
stack.pop_back();
continue;
}
HloInstruction* next_user = instr->users()[data.second++];
if (is_final_slice_insertion(next_user)) {
continue;
}
if (added_instructions.insert(next_user).second) {
stack.push_back(std::make_pair(next_user, 0));
}
}
return std::make_pair(final_slice_insertions, formatting_ops);
}
bool IsLoopIterator(const HloInstruction* instr,
int64_t loop_iteration_tuple_idx) {
if (instr->opcode() != HloOpcode::kGetTupleElement ||
instr->operand(0)->opcode() != HloOpcode::kParameter) {
return false;
}
return instr->tuple_index() == loop_iteration_tuple_idx;
}
std::vector<HloInstruction*> CollectDependenciesToPipeline(
absl::Span<const HloInstruction* const> source_ops,
absl::Span<HloInstruction* const> ops) {
absl::flat_hash_set<const HloInstruction*> formatting_set(ops.begin(),
ops.end());
formatting_set.insert(source_ops.begin(), source_ops.end());
std::vector<HloInstruction*> to_return;
for (const HloInstruction* op : ops) {
for (HloInstruction* operand : op->operands()) {
if (!formatting_set.count(operand)) {
formatting_set.insert(operand);
to_return.push_back(operand);
}
}
}
return to_return;
}
std::optional<std::vector<HloInstruction*>> CollectIndependentOperandChain(
HloInstruction* instr, int64_t loop_iter,
const absl::flat_hash_set<const HloInstruction*>& loop_invariant_params,
HloPredicate should_allow_loop_variant_parameter_in_chain,
const absl::flat_hash_set<const HloInstruction*>&
loop_invariant_instructions,
bool should_add_loop_invariant_op_in_chain) {
std::vector<HloInstruction*> chain;
absl::flat_hash_set<const HloInstruction*> visited_set({instr});
std::vector<std::pair<HloInstruction*, int>> stack(1, {instr, 0});
auto is_loop_variant_parameter_input =
[&loop_invariant_params, loop_iter](const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kGetTupleElement ||
instr->operand(0)->opcode() != HloOpcode::kParameter) {
return false;
}
return !IsLoopIterator(instr, loop_iter) &&
!loop_invariant_params.count(instr);
};
while (!stack.empty()) {
auto& curr = stack.back();
if (curr.second == curr.first->operand_count()) {
if (curr.first != instr) {
chain.push_back(curr.first);
}
stack.pop_back();
continue;
}
HloInstruction* curr_operand = curr.first->mutable_operand(curr.second++);
if (curr_operand->opcode() == HloOpcode::kParameter) {
continue;
}
if (is_loop_variant_parameter_input(curr_operand) &&
!should_allow_loop_variant_parameter_in_chain(curr_operand)) {
return std::nullopt;
}
if (visited_set.insert(curr_operand).second) {
stack.emplace_back(curr_operand, 0);
}
}
for (auto* chain_instr : chain) {
if (chain_instr->opcode() == HloOpcode::kAfterAll) {
continue;
}
if (chain_instr->opcode() == HloOpcode::kRecvDone) {
return std::nullopt;
}
const bool all_users_in_chain = absl::c_all_of(
chain_instr->users(), [&visited_set](const HloInstruction* u) {
return visited_set.contains(u);
});
const bool is_scalar_shaped =
ShapeUtil::IsEffectiveScalar(chain_instr->shape());
if (!all_users_in_chain) {
bool allow_loop_variant_parameter_in_chain =
(chain_instr->opcode() != HloOpcode::kGetTupleElement ||
chain_instr->operand(0)->opcode() != HloOpcode::kParameter ||
!should_allow_loop_variant_parameter_in_chain(chain_instr));
bool add_loop_invariant_op_in_chain =
(should_add_loop_invariant_op_in_chain &&
loop_invariant_instructions.contains(chain_instr));
if ((!loop_invariant_params.contains(chain_instr) && !is_scalar_shaped &&
allow_loop_variant_parameter_in_chain) &&
!add_loop_invariant_op_in_chain) {
return std::nullopt;
}
}
}
return std::move(chain);
}
std::optional<std::vector<HloInstruction*>> CollectChainsToPushBackwards(
HloInstruction* instr, int64_t loop_iter, const HloComputation* while_body,
int64_t level_to_operate_on,
const absl::flat_hash_set<const HloInstruction*>& loop_invariant_params,
HloPredicate should_allow_loop_variant_parameter_in_chain,
bool should_allow_control_dependencies,
const absl::flat_hash_set<const HloInstruction*>&
loop_invariant_instructions,
bool should_add_loop_invariant_op_in_chain) {
if (instr->HasControlDependencies() && !should_allow_control_dependencies) {
return std::nullopt;
}
return CollectIndependentOperandChain(
instr, loop_iter, loop_invariant_params,
should_allow_loop_variant_parameter_in_chain, loop_invariant_instructions,
should_add_loop_invariant_op_in_chain);
}
std::optional<int64_t> FindOutputIndexForDynamicUpdateSlice(
const HloInstruction* dus, const HloInstruction* root_instr) {
std::optional<int64_t> output_idx;
while (dus->opcode() == HloOpcode::kDynamicUpdateSlice) {
if (dus->user_count() != 1) {
output_idx = std::nullopt;
break;
}
if (dus->users()[0] == root_instr) {
auto indices = root_instr->OperandIndices(dus);
if (indices.size() != 1) {
output_idx = std::nullopt;
break;
}
output_idx = indices[0];
break;
}
dus = Cast<HloDynamicUpdateSliceInstruction>(dus->users()[0]);
}
return output_idx;
}
std::vector<HloInstruction*> MapNewOperands(
absl::Span<HloInstruction* const> operands, const InstructionMap& clone_map,
bool allow_unmapped = false) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(operands.size());
for (HloInstruction* operand : operands) {
auto it = clone_map.find(operand);
HloInstruction* mapped_operand = operand;
CHECK(it != clone_map.end() || allow_unmapped)
<< operand->ToString() << " not present in map";
if (it != clone_map.end()) {
mapped_operand = it->second;
}
new_operands.push_back(mapped_operand);
}
return new_operands;
}
struct WhileMoveInfo {
std::vector<HloInstruction*> collectives_to_move;
std::vector<HloDynamicUpdateSliceInstruction*> dynamic_update_slices;
std::vector<HloInstruction*> formatting_ops;
int64_t sliced_idx;
std::vector<int64_t> output_indices;
};
std::string ToString(const WhileMoveInfo& move_info) {
CHECK_EQ(move_info.dynamic_update_slices.size(),
move_info.output_indices.size());
std::vector<std::pair<decltype(move_info.dynamic_update_slices)::value_type,
decltype(move_info.output_indices)::value_type>>
zip_result;
zip_result.reserve(move_info.dynamic_update_slices.size());
for (int64_t i = 0; i < move_info.dynamic_update_slices.size(); ++i) {
zip_result.push_back(std::make_pair(move_info.dynamic_update_slices[i],
move_info.output_indices[i]));
}
return absl::StrFormat(
"\tCollectives:\n\t\t%s\n\tDynamicUpdateSlices:\n\t\t%s\n\tFormatting "
"ops:\n\t\t%s\n\tSliced index: %d",
absl::StrJoin(move_info.collectives_to_move, ",\n\t\t",
[](std::string* out, HloInstruction* instr) {
absl::StrAppend(out, instr->name());
}),
absl::StrJoin(zip_result, ",\n\t\t",
[](std::string* out, const auto& item) {
absl::StrAppend(
out, absl::StrFormat("%s (%d)", item.first->name(),
item.second));
}),
absl::StrJoin(move_info.formatting_ops, ",\n\t\t",
[](std::string* out, HloInstruction* instr) {
absl::StrAppend(out, instr->name());
}),
move_info.sliced_idx);
}
void UpdateInstructionChannelId(HloInstruction* cloned_instr,
int64_t& next_channel_id) {
if (const auto* send_recv_instr =
DynCast<HloSendRecvInstruction>(cloned_instr)) {
if (!send_recv_instr->is_host_transfer()) {
return;
}
}
if (auto* channel_instr = DynCast<HloChannelInstruction>(cloned_instr)) {
if (channel_instr->opcode() == HloOpcode::kSendDone ||
channel_instr->opcode() == HloOpcode::kRecvDone) {
auto* operand = channel_instr->operand(0);
CHECK(operand->opcode() == HloOpcode::kSend ||
operand->opcode() == HloOpcode::kRecv);
channel_instr->set_channel_id(
Cast<HloChannelInstruction>(operand)->channel_id());
return;
}
if (channel_instr->channel_id()) {
channel_instr->set_channel_id(next_channel_id++);
}
}
}
template <typename Comp>
absl::StatusOr<HloInstruction*> CloneBackwardChain(
Comp& target_computation, const WhileMoveInfo& move_info,
InstructionMap& clone_map, int64_t loop_iter_idx, int64_t& next_channel_id,
LoopVariantParameterInfo* loop_variant_parameter_info = nullptr) {
std::vector<HloInstruction*> to_clone(move_info.formatting_ops.begin(),
move_info.formatting_ops.end());
to_clone.push_back(move_info.collectives_to_move[0]);
HloInstruction* last_cloned = nullptr;
for (auto* chain_op : to_clone) {
if (IsLoopIterator(chain_op, loop_iter_idx) ||
clone_map.contains(chain_op)) {
continue;
}
auto new_operands = MapNewOperands(chain_op->operands(), clone_map);
HloInstruction* cloned = target_computation.AddInstruction(
chain_op->CloneWithNewOperands(chain_op->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(chain_op, cloned, clone_map));
UpdateInstructionChannelId(cloned, next_channel_id);
clone_map[chain_op] = cloned;
last_cloned = cloned;
if (loop_variant_parameter_info != nullptr &&
chain_op->opcode() == HloOpcode::kGetTupleElement &&
chain_op->operand(0)->opcode() == HloOpcode::kParameter &&
chain_op->tuple_index() != loop_iter_idx) {
loop_variant_parameter_info->push_back(
std::make_pair(chain_op->tuple_index(), cloned));
}
}
CHECK_NE(last_cloned, nullptr);
return last_cloned;
}
class WhileLoopAnalysis {
public:
explicit WhileLoopAnalysis(
HloInstruction* while_instr, int64_t max_pipelining_per_loop,
bool pipeline_use_tree, bool process_different_sized_options,
TuplePointsToAnalysis* tuple_points_to_analysis, CallGraph* call_graph,
std::optional<ConstantValue> known_start = std::nullopt)
: while_(while_instr),
loop_start_(known_start),
max_pipelining_per_loop_(max_pipelining_per_loop),
tuple_points_to_analysis_(tuple_points_to_analysis),
call_graph_(call_graph),
pipeline_use_tree_(pipeline_use_tree),
process_different_sized_options_(process_different_sized_options) {}
std::optional<ConstantValue> GetLoopIterationCount() const;
std::optional<ConstantValue> GetLoopStart() const;
std::optional<ConstantValue> GetLoopIncrement() const;
const std::vector<WhileMoveInfo>& GetMoveInfos() const;
std::optional<int64_t> GetLoopIterationIdx() const {
return loop_iteration_idx_;
}
int64_t GetDUSIndex(const HloInstruction* dus) const;
const absl::flat_hash_map<HloInstruction*, int64_t>& GetDUSIndices() const {
return dus_index_map_;
}
int64_t GetUniqueDUSIndices() const { return dus_index_map_.size(); }
int64_t GetMaxPipeliningPerLoop() const { return max_pipelining_per_loop_; }
bool ComputeLoopStatistics();
std::optional<std::pair<int64_t, int64_t>> IsSupportedDynamicUpdateSlice(
const HloDynamicUpdateSliceInstruction* dyn_update,
const HloInstruction* instr,
const std::vector<HloInstruction*>& formatting_ops,
CollectivePipeliner::PipeliningDirection direction,
int64_t level_to_operate_on,
const absl::flat_hash_map<int64_t, int64_t>& parameter_gtes_count,
const absl::flat_hash_map<const HloInstruction*, Range>& index_ranges)
const;
void MergeIntoExistingCollectivesForward(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order);
void MergeIntoExistingCollectivesForwardSink(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order);
void MergeIntoExistingCollectives(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order,
CollectivePipeliner::PipeliningDirection direction);
void CollectCollectivesToMove(
int64_t level_to_operate_on,
CollectivePipeliner::PipeliningDirection direction,
HloPredicate should_process, HloPredicate acceptable_formatting,
HloPredicate should_allow_loop_variant_parameter_in_chain =
HloPredicateFalse,
bool should_allow_control_dependencies = false,
bool should_add_loop_invariant_op_in_chain = false);
HloInstruction* while_loop_instruction() const { return while_; }
void ExtractLoopInvariantOps();
private:
HloInstruction* while_;
std::optional<ConstantValue> loop_iteration_count_;
std::optional<ConstantValue> loop_increment_;
std::optional<ConstantValue> loop_start_;
std::optional<ConstantValue> loop_bound_;
std::optional<int64_t> loop_iteration_idx_;
std::vector<WhileMoveInfo> move_infos_;
absl::flat_hash_map<HloInstruction*, int64_t> dus_index_map_;
absl::flat_hash_set<const HloInstruction*> invariant_loop_parameters_;
absl::flat_hash_set<const HloInstruction*> invariant_loop_instructions_;
int64_t max_pipelining_per_loop_;
TuplePointsToAnalysis* tuple_points_to_analysis_;
CallGraph* call_graph_;
bool pipeline_use_tree_;
bool process_different_sized_options_;
};
int64_t WhileLoopAnalysis::GetDUSIndex(const HloInstruction* dus) const {
auto it = dus_index_map_.find(dus);
CHECK(it != dus_index_map_.end());
return it->second;
}
void WhileLoopAnalysis::ExtractLoopInvariantOps() {
for (HloInstruction* inst :
while_->while_body()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kConstant) {
invariant_loop_instructions_.insert(inst);
continue;
}
if (invariant_loop_instructions_.contains(inst)) {
continue;
}
bool should_add = true;
for (const HloInstruction* operand : inst->operands()) {
should_add &= (invariant_loop_instructions_.contains(operand) ||
invariant_loop_parameters_.contains(operand));
}
if (should_add) {
invariant_loop_instructions_.insert(inst);
}
}
}
bool WhileLoopAnalysis::ComputeLoopStatistics() {
if (loop_iteration_count_) {
return true;
}
std::optional<ParsedWhileLoop> parsed_loop = PatternMatchParseWhileLoop(
while_, {tuple_points_to_analysis_, call_graph_});
if (!parsed_loop || !parsed_loop->static_while_loop) {
return false;
}
if (!IsSupportedLoopIndexType(
while_->shape()
.tuple_shapes(parsed_loop->static_while_loop->induction_var_index)
.element_type())) {
return false;
}
const HloInstruction* loop_root = while_->while_body()->root_instruction();
const int64_t bitwidth = primitive_util::BitWidth(
loop_root->operand(parsed_loop->static_while_loop->induction_var_index)
->shape()
.element_type());
const bool is_signed = primitive_util::IsSignedIntegralType(
loop_root->operand(parsed_loop->static_while_loop->induction_var_index)
->shape()
.element_type());
const ConstantValue bound =
is_signed ? ConstantValue::GetSigned(
parsed_loop->static_while_loop->loop_bound, bitwidth)
: ConstantValue::GetUnsigned(
parsed_loop->static_while_loop->loop_bound, bitwidth);
const ConstantValue increment =
is_signed ? ConstantValue::GetSigned(
parsed_loop->static_while_loop->step_size, bitwidth)
: ConstantValue::GetUnsigned(
parsed_loop->static_while_loop->step_size, bitwidth);
loop_start_ =
is_signed ? ConstantValue::GetSigned(
parsed_loop->static_while_loop->induction_var_init_value,
bitwidth)
: ConstantValue::GetUnsigned(
parsed_loop->static_while_loop->induction_var_init_value,
bitwidth);
auto iteration_range = bound.sub(*loop_start_);
auto iter_count = iteration_range.div(increment);
loop_iteration_count_ =
iteration_range.mod(increment).gt(
ConstantValue::GetZero(increment.GetBitwidth(), increment.IsSigned()))
? iter_count.add(ConstantValue::GetOne(increment.GetBitwidth(),
increment.IsSigned()))
: iter_count;
if (loop_iteration_count_->lt(iter_count)) {
return false;
}
loop_bound_ = bound;
loop_increment_ = increment;
loop_iteration_idx_ = parsed_loop->static_while_loop->induction_var_index;
VLOG(1) << "Bound: " << loop_bound_->ToString()
<< " Start: " << loop_start_->ToString()
<< " Increment: " << loop_increment_->ToString();
if (loop_root->opcode() == HloOpcode::kTuple) {
for (int i = 0; i < loop_root->operand_count(); ++i) {
if (loop_root->operand(i)->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
if (i != loop_root->operand(i)->tuple_index()) {
continue;
}
invariant_loop_parameters_.insert(loop_root->operand(i));
}
}
ExtractLoopInvariantOps();
return true;
}
std::optional<std::pair<int64_t, int64_t>>
WhileLoopAnalysis::IsSupportedDynamicUpdateSlice(
const HloDynamicUpdateSliceInstruction* dyn_update,
const HloInstruction* instr,
const std::vector<HloInstruction*>& formatting_ops,
CollectivePipeliner::PipeliningDirection direction,
int64_t level_to_operate_on,
const absl::flat_hash_map<int64_t, int64_t>& parameter_gtes_count,
const absl::flat_hash_map<const HloInstruction*, Range>& index_ranges)
const {
HloComputation* while_body = while_->while_body();
const HloInstruction* loop_parameter =
while_body->parameter_instructions()[0];
std::optional<int64_t> sliced_dim = GetSlicedDimension(dyn_update);
if (!sliced_dim.has_value()) {
VLOG(5) << "Skipping " << instr->name()
<< " because couldn't find sliced dimension";
return std::nullopt;
}
if (direction == CollectivePipeliner::PipeliningDirection::kForwardSink &&
(*sliced_dim != 0 || dyn_update->shape().dimensions(0) !=
loop_iteration_count_->GetUnsignedValue())) {
VLOG(5) << "Skipping " << instr->name()
<< " because number of iteration of the loop doesn't match "
"slices being inserted or slice dim is not 0. slice_dim = "
<< *sliced_dim
<< " loop count = " << loop_iteration_count_->GetUnsignedValue();
return std::nullopt;
}
if (!process_different_sized_options_) {
if (!formatting_ops.empty()) {
if (instr->operand(0)->shape() != formatting_ops.back()->shape()) {
VLOG(5) << "Skipping " << instr->name()
<< " because operand and last formatting op don't have the "
"same shape";
return std::nullopt;
}
auto dependencies_to_pipeline = CollectDependenciesToPipeline(
absl::MakeConstSpan({instr}), absl::MakeConstSpan(formatting_ops));
bool skip_because_not_same_size = false;
for (auto* dependency : dependencies_to_pipeline) {
if (ShapeUtil::IsEffectiveScalar(dependency->shape())) {
skip_because_not_same_size = true;
break;
}
}
if (skip_because_not_same_size) {
VLOG(5)
<< "Skipping " << instr->name()
<< " because formatting ops do not have the expected shapes/sizes";
return std::nullopt;
}
} else if (instr->operand(0)->shape() != instr->shape()) {
VLOG(5) << "Skipping " << instr->name()
<< " because instr does not have the same shape as its operand";
return std::nullopt;
}
}
const HloInstruction* to_insert_into = dyn_update->operand(0);
if (level_to_operate_on == 0 &&
(to_insert_into->opcode() != HloOpcode::kGetTupleElement ||
to_insert_into->operand(0) != loop_parameter)) {
VLOG(5) << "Skipping " << instr->name()
<< " because slice to insert into is not a GTE from input "
"parameter "
<< to_insert_into->ToString();
return std::nullopt;
}
if (level_to_operate_on == 0) {
if (to_insert_into->opcode() == HloOpcode::kGetTupleElement) {
if (parameter_gtes_count.at(to_insert_into->tuple_index()) != 1) {
VLOG(5) << "Skipping " << instr->name()
<< " because there are multiple parameter GTEs for this slice";
return std::nullopt;
}
}
const HloInstruction* dyn_update_idx = dyn_update->operand(
dyn_update->first_index_operand_number() + *sliced_dim);
if (level_to_operate_on == 0 &&
!CheckParameterUsageIsCompatible(to_insert_into, dyn_update,
dyn_update_idx, *sliced_dim)) {
VLOG(5) << "Skipping " << instr->name()
<< " because parameter usage doesn't follow the expected pattern";
return std::nullopt;
}
if (!AllIndicesConstantsExceptOne(
dyn_update,
dyn_update->first_index_operand_number() + *sliced_dim)) {
VLOG(5) << "Skipping " << instr->name()
<< " because update slicing doesn't match expectation";
return std::nullopt;
}
if (!CheckIndexIsMonotonic(dyn_update_idx, index_ranges)) {
VLOG(5) << "Skipping " << instr->name()
<< " because update index is not monotonic";
return std::nullopt;
}
}
std::optional<int64_t> output_idx = FindOutputIndexForDynamicUpdateSlice(
dyn_update, while_body->root_instruction());
if (!output_idx.has_value()) {
VLOG(5) << "Skipping " << instr->name()
<< " because couldn't find unique output index for insertion";
return std::nullopt;
}
return std::make_pair(*sliced_dim, *output_idx);
}
void WhileLoopAnalysis::MergeIntoExistingCollectivesForward(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order) {
CHECK_EQ(indices_to_merge.size(), 1);
CHECK_EQ(dyn_updates.size(), 1);
int64_t target_idx = indices_to_merge[0];
CHECK_EQ(move_infos_[target_idx].dynamic_update_slices.size(), 1);
CHECK_EQ(move_infos_[target_idx].collectives_to_move.size(), 1);
HloDynamicUpdateSliceInstruction* dyn_update = dyn_updates[0];
CHECK_EQ(move_infos_[target_idx].dynamic_update_slices[0], dyn_update)
<< "Not the same dynamic-update-slice for converging entry";
absl::flat_hash_set<const HloInstruction*> existing_entry_instrs(
move_infos_[target_idx].formatting_ops.begin(),
move_infos_[target_idx].formatting_ops.end());
existing_entry_instrs.insert(move_infos_[target_idx].collectives_to_move[0]);
if (existing_entry_instrs.count(instr)) {
return;
}
move_infos_[target_idx].formatting_ops.push_back(instr);
for (auto* op : formatting_ops) {
if (!existing_entry_instrs.count(op)) {
move_infos_[target_idx].formatting_ops.push_back(op);
}
}
absl::c_sort(move_infos_[target_idx].formatting_ops,
[&](const HloInstruction* a, const HloInstruction* b) {
return instruction_order[a] < instruction_order[b];
});
}
void WhileLoopAnalysis::MergeIntoExistingCollectivesForwardSink(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order) {
CHECK(!indices_to_merge.empty());
const int64_t target_idx = *absl::c_min_element(indices_to_merge);
absl::flat_hash_set<const HloInstruction*> existing_formatting_ops(
move_infos_[target_idx].formatting_ops.begin(),
move_infos_[target_idx].formatting_ops.end());
absl::flat_hash_set<const HloInstruction*> existing_collectives_to_move(
move_infos_[target_idx].collectives_to_move.begin(),
move_infos_[target_idx].collectives_to_move.end());
absl::flat_hash_set<const HloInstruction*> existing_dyn_updates(
move_infos_[target_idx].dynamic_update_slices.begin(),
move_infos_[target_idx].dynamic_update_slices.end());
auto merge_entry_to_target =
[&](std::vector<HloInstruction*> collectives_to_merge,
std::vector<HloInstruction*>& formatting_ops_to_merge,
std::vector<HloDynamicUpdateSliceInstruction*>& dyn_updates_to_merge,
int64_t sliced_idx_to_merge,
std::vector<int64_t>& output_indices_to_merge) {
for (HloInstruction* op : collectives_to_merge) {
if (!existing_collectives_to_move.count(op)) {
move_infos_[target_idx].collectives_to_move.push_back(op);
}
}
for (HloInstruction* op : formatting_ops_to_merge) {
if (!existing_formatting_ops.count(op)) {
move_infos_[target_idx].formatting_ops.push_back(op);
}
}
for (int64_t i = 0; i < dyn_updates_to_merge.size(); ++i) {
HloDynamicUpdateSliceInstruction* dyn_update =
dyn_updates_to_merge[i];
index_per_dyn_update_slice[dyn_update] = target_idx;
if (!existing_dyn_updates.count(dyn_update)) {
move_infos_[target_idx].dynamic_update_slices.push_back(dyn_update);
CHECK_EQ(sliced_idx_to_merge, move_infos_[target_idx].sliced_idx);
move_infos_[target_idx].output_indices.push_back(
output_indices_to_merge[i]);
}
}
};
for (int64_t idx : indices_to_merge) {
if (idx == target_idx) {
continue;
}
merge_entry_to_target(
move_infos_[idx].collectives_to_move, move_infos_[idx].formatting_ops,
move_infos_[idx].dynamic_update_slices, move_infos_[idx].sliced_idx,
move_infos_[idx].output_indices);
move_infos_.erase(move_infos_.begin() + idx);
}
merge_entry_to_target({instr}, formatting_ops, dyn_updates, sliced_idx,
output_indices);
absl::c_sort(move_infos_[target_idx].formatting_ops,
[&](const HloInstruction* a, const HloInstruction* b) {
return instruction_order[a] < instruction_order[b];
});
}
void WhileLoopAnalysis::MergeIntoExistingCollectives(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order,
CollectivePipeliner::PipeliningDirection direction) {
if (direction == CollectivePipeliner::PipeliningDirection::kForwardSink) {
MergeIntoExistingCollectivesForwardSink(
instr, formatting_ops, dyn_updates, sliced_idx, output_indices,
indices_to_merge, index_per_dyn_update_slice, instruction_order);
return;
}
if (direction == CollectivePipeliner::PipeliningDirection::kForward) {
MergeIntoExistingCollectivesForward(instr, formatting_ops, dyn_updates,
indices_to_merge, instruction_order);
return;
}
CHECK(false) << "Backward pipelining is not supported in "
"MergeIntoExistingCollectives ";
}
void WhileLoopAnalysis::CollectCollectivesToMove(
int64_t level_to_operate_on,
CollectivePipeliner::PipeliningDirection direction,
HloPredicate should_process, HloPredicate acceptable_formatting,
HloPredicate should_allow_loop_variant_parameter_in_chain,
bool should_allow_control_dependencies,
bool should_add_loop_invariant_op_in_chain) {
move_infos_.clear();
HloComputation* while_body = while_->while_body();
const HloInstruction* loop_parameter =
while_body->parameter_instructions()[0];
if (absl::c_any_of(loop_parameter->users(), [](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kGetTupleElement;
})) {
return;
}
if (absl::c_any_of(while_->users(), [](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kGetTupleElement;
})) {
return;
}
absl::flat_hash_map<int64_t, int64_t> parameter_gtes_count;
for (auto* user : loop_parameter->users()) {
CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement);
++parameter_gtes_count[user->tuple_index()];
}
absl::flat_hash_map<const HloInstruction*, Range> index_ranges;
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice;
std::optional<Range> index_range;
if (loop_bound_) {
index_range = Range{*loop_start_,
loop_start_->add(loop_iteration_count_
->sub(ConstantValue::GetOne(
loop_start_->GetBitwidth(),
loop_start_->IsSigned()))
.mul(*loop_increment_)),
true};
}
int64_t count = 0;
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order;
std::vector<HloInstruction*> instructions_post_order =
while_body->MakeInstructionPostOrder();
for (auto* instr : instructions_post_order) {
if (instr->opcode() == HloOpcode::kGetTupleElement) {
if (index_range && instr->tuple_index() == 0) {
index_ranges.insert({instr, *index_range});
}
}
instruction_order[instr] = count++;
}
for (auto* instr : instructions_post_order) {
if (direction == CollectivePipeliner::PipeliningDirection::kForward &&
(instr->operand_count() != 1 ||
instr->shape().dimensions_size() !=
instr->operand(0)->shape().dimensions_size())) {
continue;
}
if (!should_process(instr)) {
continue;
}
if (direction == CollectivePipeliner::PipeliningDirection::kForward ||
direction == CollectivePipeliner::PipeliningDirection::kForwardSink) {
auto [dyn_updates, formatting_ops] = CheckStoreIntoSliceIsCompatible(
instr, while_body, level_to_operate_on, pipeline_use_tree_,
acceptable_formatting,
direction ==
CollectivePipeliner::PipeliningDirection::kForwardSink);
if (dyn_updates.empty()) {
VLOG(5)
<< "Skipping " << instr->name()
<< " because storing into slice is not compatible with pipelining";
continue;
}
CHECK(direction != CollectivePipeliner::PipeliningDirection::kForward ||
dyn_updates.size() == 1);
int64_t sliced_idx = -1;
std::vector<int64_t> output_indices;
bool skip_instr = false;
bool not_first_dyn_update = false;
for (HloDynamicUpdateSliceInstruction* dyn_update : dyn_updates) {
std::optional<std::pair<int64_t, int64_t>> maybe_dus_info =
IsSupportedDynamicUpdateSlice(dyn_update, instr, formatting_ops,
direction, level_to_operate_on,
parameter_gtes_count, index_ranges);
if (!maybe_dus_info.has_value()) {
VLOG(5) << "Skipping " << instr->name() << " because "
<< dyn_update->name() << " is not supported";
skip_instr = true;
break;
}
output_indices.push_back(maybe_dus_info->second);
if (not_first_dyn_update) {
CHECK_NE(dyn_update->operand(0), dyn_updates[0]->operand(0));
CHECK_EQ(sliced_idx, maybe_dus_info->first);
} else {
sliced_idx = maybe_dus_info->first;
}
not_first_dyn_update = true;
}
if (skip_instr) {
continue;
}
CHECK_NE(sliced_idx, -1);
std::vector<int64_t> indices_to_merge;
for (HloDynamicUpdateSliceInstruction* dyn_update : dyn_updates) {
if (index_per_dyn_update_slice.find(dyn_update) !=
index_per_dyn_update_slice.end()) {
int64_t index = index_per_dyn_update_slice[dyn_update];
if (!absl::c_linear_search(indices_to_merge, index)) {
indices_to_merge.push_back(index);
}
}
}
if (!indices_to_merge.empty()) {
MergeIntoExistingCollectives(
instr, formatting_ops, dyn_updates, sliced_idx, output_indices,
indices_to_merge, index_per_dyn_update_slice, instruction_order,
direction);
} else {
absl::c_sort(formatting_ops,
[&](const HloInstruction* a, const HloInstruction* b) {
return instruction_order[a] < instruction_order[b];
});
for (HloDynamicUpdateSliceInstruction* dyn_update : dyn_updates) {
index_per_dyn_update_slice[dyn_update] = move_infos_.size();
}
move_infos_.push_back({{instr},
dyn_updates,
std::move(formatting_ops),
sliced_idx,
std::move(output_indices)});
}
} else {
CHECK_EQ(direction, CollectivePipeliner::PipeliningDirection::kBackward);
auto chain_collected = CollectChainsToPushBackwards(
instr, *loop_iteration_idx_, while_body, level_to_operate_on,
invariant_loop_parameters_,
should_allow_loop_variant_parameter_in_chain,
should_allow_control_dependencies, invariant_loop_instructions_,
should_add_loop_invariant_op_in_chain);
if (!chain_collected.has_value()) {
VLOG(5) << "Skipping " << instr->name()
<< " because didn't find compatible slice of parameter";
continue;
}
move_infos_.push_back(
WhileMoveInfo{{instr}, {}, std::move(*chain_collected), {}, {}});
}
if (move_infos_.size() >= max_pipelining_per_loop_) {
break;
}
}
if (direction != CollectivePipeliner::PipeliningDirection::kForward) {
return;
}
dus_index_map_.clear();
for (auto& to_move : move_infos_) {
CHECK_EQ(to_move.dynamic_update_slices.size(), 1);
HloInstruction* dus_index =
to_move.dynamic_update_slices[0]->mutable_operand(
to_move.dynamic_update_slices[0]->first_index_operand_number() +
to_move.sliced_idx);
auto it = dus_index_map_.find(dus_index);
int64_t dus_index_tuple_position = dus_index_map_.size();
if (it != dus_index_map_.end()) {
dus_index_tuple_position = it->second;
} else {
dus_index_map_[dus_index] = dus_index_tuple_position;
}
}
}
std::optional<ConstantValue> WhileLoopAnalysis::GetLoopIterationCount() const {
return loop_iteration_count_;
}
std::optional<ConstantValue> WhileLoopAnalysis::GetLoopStart() const {
return loop_start_;
}
std::optional<ConstantValue> WhileLoopAnalysis::GetLoopIncrement() const {
return loop_increment_;
}
const std::vector<WhileMoveInfo>& WhileLoopAnalysis::GetMoveInfos() const {
return move_infos_;
}
bool IsLoopInvariant(
const HloInstruction* instr,
absl::flat_hash_map<const HloInstruction*, bool>& invariant_cache) {
auto it = invariant_cache.find(instr);
if (it != invariant_cache.end()) {
return it->second;
}
std::vector<std::pair<const HloInstruction*, int>> stack(
1, std::make_pair(instr, 0));
while (!stack.empty()) {
auto& current = stack.back();
invariant_cache[std::get<0>(current)] = true;
if (std::get<0>(current)->HasSideEffect() ||
std::get<0>(current)->opcode() == HloOpcode::kParameter) {
invariant_cache[std::get<0>(current)] = false;
stack.pop_back();
continue;
}
if (std::get<0>(current)->operands().empty()) {
invariant_cache[std::get<0>(current)] = true;
stack.pop_back();
continue;
}
if (std::get<1>(current) > 0) {
auto* current_operand =
std::get<0>(current)->operand(std::get<1>(current) - 1);
auto cop_it = invariant_cache.find(current_operand);
CHECK(cop_it != invariant_cache.end())
<< "Entry expected to be populated";
if (!cop_it->second) {
invariant_cache[std::get<0>(current)] = false;
stack.pop_back();
continue;
}
}
if (std::get<0>(current)->operand_count() == std::get<1>(current)) {
stack.pop_back();
continue;
}
auto* next_operand = std::get<0>(current)->operand(std::get<1>(current)++);
auto op_it = invariant_cache.find(next_operand);
if (op_it == invariant_cache.end()) {
stack.push_back(std::make_pair(next_operand, 0));
} else if (!op_it->second) {
invariant_cache[next_operand] &= op_it->second;
}
}
it = invariant_cache.find(instr);
CHECK(it != invariant_cache.end())
<< "We should have computed \"instr\" value";
return it->second;
}
Shape ComputeFullOutputShape(const WhileMoveInfo& move_info,
const Shape& base_shape) {
HloDynamicUpdateSliceInstruction* dus = move_info.dynamic_update_slices[0];
return ShapeUtil::PrependMajorDimension(
dus->operand(0)->shape().dimensions()[move_info.sliced_idx], base_shape);
}
HloInstruction* CreateZero(HloComputation* comp, const Shape& shape,
PrimitiveType ptype) {
if (shape.dimensions_size() == 0) {
return comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype)));
}
HloInstruction* zero_constant =
comp->AddInstruction(HloInstruction::CreateBroadcast(
shape,
comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype))),
{}));
return zero_constant;
}
}
using Interval = std::pair<int64_t, int64_t>;
using Intervals = std::vector<Interval>;
absl::StatusOr<std::vector<Interval>> ParseVectorOfPairs(
absl::string_view str) {
TF_ASSIGN_OR_RETURN(std::vector<ReplicaGroup> replica_groups,
ParseReplicaGroupsOnly(str));
std::vector<Interval> res;
res.reserve(replica_groups.size());
for (const ReplicaGroup& replica_group : replica_groups) {
TF_RET_CHECK(replica_group.replica_ids_size() == 2);
int64_t a = replica_group.replica_ids(0);
int64_t b = replica_group.replica_ids(1);
res.emplace_back(a, b);
}
return res;
}
absl::Status UpdateSendRecvValidation(
HloInstruction* instruction, bool is_peeled,
CollectivePipeliner::PipeliningDirection direction,
const WhileLoopAnalysis& loop_analysis) {
if (instruction->opcode() != HloOpcode::kCollectivePermute) {
return absl::OkStatus();
}
const auto& frontend_attributes = instruction->frontend_attributes().map();
if (!frontend_attributes.contains(kSendRecvValidationAttr)) {
return absl::OkStatus();
}
VLOG(3) << "Trip count = "
<< loop_analysis.GetLoopIterationCount()->GetSignedValue();
VLOG(3) << "Collective permute with _xla_send_recv_validation: "
<< instruction->ToString();
TF_ASSIGN_OR_RETURN(
Intervals old_intervals,
ParseVectorOfPairs(frontend_attributes.at(kSendRecvValidationAttr)));
Intervals intervals;
if (direction == CollectivePipeliner::kForward) {
for (auto [a, b] : old_intervals) {
if (is_peeled) {
if (a <= 0 && 0 <= b) {
intervals.push_back({0, 0});
} else {
intervals.push_back({1, 0});
}
} else {
intervals.push_back(
{std::max(int64_t{0}, a - 1), std::max(int64_t{0}, b - 1)});
}
}
} else if (direction == CollectivePipeliner::kBackward) {
auto trip_count_value = loop_analysis.GetLoopIterationCount();
if (!trip_count_value) {
return absl::InternalError(
"Unable to deduce loop trip count in collective pipeliner. This is "
"required for backward pipelining while fixing the "
"_xla_send_recv_validation attribute");
}
int64_t trip_count = trip_count_value->GetSignedValue();
int64_t last_iteration = trip_count - 1;
for (auto [a, b] : old_intervals) {
if (is_peeled) {
if (a <= last_iteration && last_iteration <= b) {
intervals.push_back({0, 0});
} else {
intervals.push_back({1, 0});
}
} else {
intervals.push_back({a, std::min(last_iteration - 1, b)});
}
}
}
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
instruction, kSendRecvValidationAttr, intervals);
VLOG(3) << "Updated collective_permute with _xla_send_recv_validation: "
<< instruction->ToString();
return absl::OkStatus();
}
absl::Status TransformLoopForward(
const WhileLoopAnalysis& loop_analysis, bool insert_non_alias_custom_call,
int64_t level_to_operate_on, bool pipeline_use_tree,
bool process_different_sized_ops, HloPredicate should_process,
HloPredicate acceptable_formatting, HloPredicate reuse_output_buffer,
int64_t& next_channel_id) {
InstructionMap while_body_to_peeled;
absl::flat_hash_set<HloInstruction*> to_skip_set;
absl::flat_hash_map<HloInstruction*, HloInstruction*> formatting_map;
absl::flat_hash_map<HloInstruction*, int64_t> is_output_instruction;
std::vector<int64_t> moves_requiring_special_output;
int64_t count = 0;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
CHECK_EQ(to_move.dynamic_update_slices.size(), 1);
to_skip_set.insert(to_move.collectives_to_move.front());
if (!to_move.formatting_ops.empty()) {
formatting_map[to_move.formatting_ops.back()] =
to_move.collectives_to_move.front();
}
const Shape& output_shape =
to_move.formatting_ops.empty()
? to_move.collectives_to_move.front()->shape()
: to_move.formatting_ops.back()->shape();
if (!reuse_output_buffer(to_move.collectives_to_move.front()) ||
output_shape !=
to_move.collectives_to_move.front()->operand(0)->shape()) {
moves_requiring_special_output.push_back(count);
to_skip_set.insert(to_move.dynamic_update_slices.front());
}
++count;
}
HloInstruction* while_loop = loop_analysis.while_loop_instruction();
HloComputation* while_body = while_loop->while_body();
CHECK_EQ(while_body->parameter_instructions().size(), 1)
<< "Expected only one parameter";
HloInstruction* loop_parameter = while_body->parameter_instructions()[0];
HloInstruction* loop_init = while_loop->mutable_operand(0);
const int64_t initial_inputs = loop_init->operand_count();
while_body_to_peeled[loop_parameter] = loop_init;
for (auto* user : loop_parameter->users()) {
CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement)
<< "Expected only get-tuple-elements as users";
while_body_to_peeled[user] =
loop_init->mutable_operand(user->tuple_index());
}
CHECK_EQ(while_body->root_instruction()->opcode(), HloOpcode::kTuple);
for (int i = 0; i < while_body->root_instruction()->operand_count(); ++i) {
is_output_instruction[while_body->root_instruction()->mutable_operand(i)] =
i;
}
HloComputation* loop_computation = while_loop->parent();
std::vector<HloInstruction*> new_init_operands;
std::vector<Shape> new_parameter_shapes;
std::vector<HloInstruction*> new_root_operands;
const int64_t operands_indices_count =
loop_init->operand_count() + loop_analysis.GetUniqueDUSIndices();
const int64_t new_loop_tuple_operand_count =
operands_indices_count + moves_requiring_special_output.size();
new_parameter_shapes.resize(new_loop_tuple_operand_count);
new_root_operands.resize(new_loop_tuple_operand_count);
new_init_operands.resize(new_loop_tuple_operand_count);
for (int i = 0; i < loop_parameter->shape().tuple_shapes().size(); ++i) {
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_root_operands[i] = while_body->root_instruction()->mutable_operand(i);
new_init_operands[i] = loop_init->mutable_operand(i);
}
for (auto* instr : while_body->MakeInstructionPostOrder()) {
if (instr == loop_parameter) {
continue;
}
if (ContainsKey(to_skip_set, instr)) {
auto it = while_body_to_peeled.find(instr->operand(0));
CHECK(it != while_body_to_peeled.end());
HloInstruction* passthrough_operand = it->second;
while_body_to_peeled[instr] = passthrough_operand;
continue;
}
auto formatting_it = formatting_map.find(instr);
if (formatting_it != formatting_map.end()) {
auto it = while_body_to_peeled.find(formatting_it->second);
CHECK(it != while_body_to_peeled.end());
HloInstruction* passthrough_operand = it->second;
while_body_to_peeled[instr] = passthrough_operand;
continue;
}
std::vector<HloInstruction*> new_operands =
MapNewOperands(instr->operands(), while_body_to_peeled);
HloInstruction* cloned_instr = loop_computation->AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
TF_RETURN_IF_ERROR(
UpdateControlDependencies(instr, cloned_instr, while_body_to_peeled));
UpdateInstructionChannelId(cloned_instr, next_channel_id);
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
cloned_instr, true, CollectivePipeliner::PipeliningDirection::kForward,
loop_analysis));
while_body_to_peeled[instr] = cloned_instr;
auto output_it = is_output_instruction.find(instr);
if (output_it != is_output_instruction.end()) {
new_init_operands[output_it->second] = cloned_instr;
}
}
for (auto& dus : loop_analysis.GetDUSIndices()) {
new_parameter_shapes[dus.second + initial_inputs] = dus.first->shape();
new_root_operands[dus.second + initial_inputs] = dus.first;
new_init_operands[dus.second + initial_inputs] =
while_body_to_peeled[dus.first];
}
absl::flat_hash_map<int64_t, int64_t> moves_requiring_special_output_to_idx;
for (int i = 0; i < moves_requiring_special_output.size(); ++i) {
HloInstruction* collective =
loop_analysis.GetMoveInfos()[moves_requiring_special_output[i]]
.collectives_to_move.front();
moves_requiring_special_output_to_idx[moves_requiring_special_output[i]] =
operands_indices_count + i;
new_parameter_shapes[operands_indices_count + i] =
collective->operand(0)->shape();
new_root_operands[operands_indices_count + i] =
collective->mutable_operand(0);
new_init_operands[operands_indices_count + i] =
while_body_to_peeled[collective->mutable_operand(0)];
}
for (auto& move_info : loop_analysis.GetMoveInfos()) {
auto pipelined_instrs = CollectDependenciesToPipeline(
absl::MakeConstSpan(move_info.collectives_to_move),
absl::MakeSpan(move_info.formatting_ops));
for (auto* pipelined : pipelined_instrs) {
is_output_instruction[pipelined] = new_init_operands.size();
new_parameter_shapes.push_back(pipelined->shape());
new_root_operands.push_back(pipelined);
new_init_operands.push_back(while_body_to_peeled[pipelined]);
}
}
Shape loop_state_shape = ShapeUtil::MakeTupleShape(new_parameter_shapes);
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
InstructionMap pipelined_values_map_inloop;
InstructionMap pipelined_values_map_outloop;
replacements[loop_parameter] = HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes), "loop_peel_param");
replacements[while_loop->while_condition()->parameter_instructions()[0]] =
HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes),
"loop_peel_cond_param");
replacements[while_body->root_instruction()] =
HloInstruction::CreateTuple(new_root_operands);
HloComputation* new_while_condition =
loop_computation->parent()->AddEmbeddedComputation(
while_loop->while_condition()->CloneWithReplacements(&replacements));
HloComputation* new_while_body =
loop_computation->parent()->AddEmbeddedComputation(
while_body->CloneWithReplacements(&replacements));
for (HloInstruction* instruction : new_while_body->instructions()) {
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
instruction, false, CollectivePipeliner::PipeliningDirection::kForward,
loop_analysis));
}
HloInstruction* new_init = loop_computation->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
while_body_to_peeled[while_body->root_instruction()] = new_init;
TF_RETURN_IF_ERROR(UpdateControlDependencies(while_body->root_instruction(),
new_init, while_body_to_peeled));
HloInstruction* new_while_loop =
loop_computation->AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, new_while_condition, new_while_body, new_init));
TF_RETURN_IF_ERROR(
while_loop->ReplaceAllUsesWithDifferentShape(new_while_loop));
TF_RETURN_IF_ERROR(
loop_computation->RemoveInstructionAndUnusedOperands(while_loop));
WhileLoopAnalysis new_loop_analysis(
new_while_loop, loop_analysis.GetMaxPipeliningPerLoop(),
pipeline_use_tree, process_different_sized_ops,
nullptr,
nullptr,
loop_analysis.GetLoopStart()->add(*loop_analysis.GetLoopIncrement()));
new_loop_analysis.ComputeLoopStatistics();
new_loop_analysis.CollectCollectivesToMove(
level_to_operate_on, CollectivePipeliner::PipeliningDirection::kForward,
should_process, acceptable_formatting);
CHECK_EQ(new_loop_analysis.GetMoveInfos().size(),
loop_analysis.GetMoveInfos().size());
for (int64_t i = new_loop_tuple_operand_count;
i < new_parameter_shapes.size(); ++i) {
HloInstruction* pipelined_value_load_inloop =
new_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
new_while_body->parameter_instruction(0), i));
HloInstruction* pipelined_value_load_outloop =
loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_loop, i));
pipelined_values_map_inloop[new_while_body->root_instruction()->operand(
i)] = pipelined_value_load_inloop;
pipelined_values_map_outloop[new_while_body->root_instruction()->operand(
i)] = pipelined_value_load_outloop;
}
auto insert_slice = [](HloInstruction* to_insert, int64_t index_position,
int64_t num_indices, HloInstruction* dus_index,
HloInstruction* base) {
HloComputation* computation = to_insert->parent();
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(dus_index->shape().element_type())));
std::vector<HloInstruction*> indices(num_indices, zero);
indices[index_position] = dus_index;
return computation->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
base->shape(), base, to_insert, indices));
};
auto process_slice =
[&next_channel_id, insert_non_alias_custom_call, level_to_operate_on](
HloInstruction* stacked_data,
const InstructionMap& pipelined_values_map,
const WhileMoveInfo& move_info) -> absl::StatusOr<HloInstruction*> {
HloInstruction* processed = stacked_data->parent()->AddInstruction(
move_info.collectives_to_move.front()->CloneWithNewOperands(
move_info.collectives_to_move.front()->shape(), {stacked_data}));
UpdateInstructionChannelId(processed, next_channel_id);
if (insert_non_alias_custom_call) {
HloInstruction* level =
stacked_data->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0(level_to_operate_on + 1)));
processed = stacked_data->parent()->AddInstruction(
HloInstruction::CreateCustomCall(
processed->shape(), {processed, level},
CollectivePipeliner::kInsertedByPreviousStep));
}
InstructionMap cloned_map = pipelined_values_map;
cloned_map[move_info.collectives_to_move.front()] = processed;
for (auto* formatting_op : move_info.formatting_ops) {
auto new_operands = MapNewOperands(formatting_op->operands(), cloned_map);
processed = stacked_data->parent()->AddInstruction(
formatting_op->CloneWithNewOperands(formatting_op->shape(),
new_operands));
cloned_map[formatting_op] = processed;
}
return processed;
};
auto extract_and_process_slice =
[&process_slice](
HloInstruction* stacked_data, HloInstruction* data_to_slice,
const WhileMoveInfo& move_info,
const InstructionMap& pipelined_values_map,
HloInstruction* dus_index) -> absl::StatusOr<HloInstruction*> {
HloComputation* computation = stacked_data->parent();
const Shape& slice_target_shape =
move_info.collectives_to_move.front()->operand(0)->shape();
HloInstruction* sliced_data = data_to_slice;
HloDynamicUpdateSliceInstruction* dyn_update =
move_info.dynamic_update_slices.front();
PrimitiveType element_type =
dyn_update
->operand(dyn_update->first_index_operand_number() +
move_info.sliced_idx)
->shape()
.element_type();
HloInstruction* zero = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
std::vector<HloInstruction*> indices(
dyn_update->operand_count() - dyn_update->first_index_operand_number(),
zero);
indices[move_info.sliced_idx] = dus_index;
if (slice_target_shape != data_to_slice->shape()) {
absl::InlinedVector<int64_t, 4> dynamic_slice_sizes;
dynamic_slice_sizes.reserve(slice_target_shape.dimensions_size());
for (int i = 0; i < slice_target_shape.dimensions_size(); ++i) {
dynamic_slice_sizes.push_back(slice_target_shape.dimensions(i));
}
sliced_data =
computation->AddInstruction(HloInstruction::CreateDynamicSlice(
slice_target_shape, data_to_slice, indices, dynamic_slice_sizes));
}
TF_ASSIGN_OR_RETURN(
sliced_data,
process_slice(sliced_data, pipelined_values_map, move_info));
return computation->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
dyn_update->shape(), stacked_data, sliced_data, indices));
};
for (int i = 0; i < new_loop_analysis.GetMoveInfos().size(); ++i) {
auto& move_info = new_loop_analysis.GetMoveInfos()[i];
HloDynamicUpdateSliceInstruction* dyn_update =
move_info.dynamic_update_slices.front();
std::vector<HloInstruction*> loop_output_to_replace;
HloInstruction* parameter_instr =
new_while_body->parameter_instructions()[0];
for (auto* user : new_while_loop->users()) {
if (user->tuple_index() != move_info.output_indices[0]) {
continue;
}
loop_output_to_replace.push_back(user);
}
const HloInstruction* dus_index_curr_iteration = dyn_update->operand(
dyn_update->first_index_operand_number() + move_info.sliced_idx);
const int64_t offset_for_index =
new_loop_analysis.GetDUSIndex(dus_index_curr_iteration) +
initial_inputs;
Shape index_shape = dus_index_curr_iteration->shape();
HloInstruction* input_dus_idx =
new_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
index_shape, parameter_instr, offset_for_index));
if (insert_non_alias_custom_call) {
HloInstruction* level =
new_while_body->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0(level_to_operate_on + 1)));
input_dus_idx =
new_while_body->AddInstruction(HloInstruction::CreateCustomCall(
index_shape, {input_dus_idx, level},
CollectivePipeliner::kInsertedByPreviousStep));
}
HloInstruction* output_dus_idx =
loop_computation->AddInstruction(HloInstruction::CreateGetTupleElement(
index_shape, new_while_loop, offset_for_index));
HloInstruction* input_stacked_data = dyn_update->mutable_operand(0);
HloInstruction* output_stacked_data =
loop_computation->AddInstruction(HloInstruction::CreateGetTupleElement(
dyn_update->shape(), new_while_loop, move_info.output_indices[0]));
HloInstruction* input_data_to_slice = input_stacked_data;
HloInstruction* output_data_to_slice = output_stacked_data;
auto it = moves_requiring_special_output_to_idx.find(i);
if (it != moves_requiring_special_output_to_idx.end()) {
input_data_to_slice =
new_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
move_info.collectives_to_move.front()->operand(0)->shape(),
parameter_instr, it->second));
output_data_to_slice = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(
move_info.collectives_to_move.front()->operand(0)->shape(),
new_while_loop, it->second));
}
TF_ASSIGN_OR_RETURN(input_stacked_data,
extract_and_process_slice(
input_stacked_data, input_data_to_slice, move_info,
pipelined_values_map_inloop, input_dus_idx));
TF_ASSIGN_OR_RETURN(
output_stacked_data,
extract_and_process_slice(output_stacked_data, output_data_to_slice,
move_info, pipelined_values_map_outloop,
output_dus_idx));
auto replace_instructions_with =
[](absl::Span<HloInstruction*> to_replace_instrs,
HloInstruction* new_instr) {
for (auto* to_replace : to_replace_instrs) {
HloComputation* computation = to_replace->parent();
TF_RETURN_IF_ERROR(to_replace->ReplaceAllUsesWith(new_instr));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(to_replace));
}
return absl::OkStatus();
};
auto* new_peeled_dus = input_stacked_data;
if (it == moves_requiring_special_output_to_idx.end()) {
new_peeled_dus = insert_slice(
move_info.collectives_to_move.front()->mutable_operand(0),
move_info.sliced_idx,
dyn_update->operand_count() -
dyn_update->first_index_operand_number(),
dyn_update->mutable_operand(dyn_update->first_index_operand_number() +
move_info.sliced_idx),
input_stacked_data);
}
TF_RETURN_IF_ERROR(dyn_update->ReplaceAllUsesWith(new_peeled_dus));
TF_RETURN_IF_ERROR(
new_while_body->RemoveInstructionAndUnusedOperands(dyn_update));
TF_RETURN_IF_ERROR(replace_instructions_with(
absl::MakeSpan(loop_output_to_replace), output_stacked_data));
}
TF_RETURN_IF_ERROR(loop_computation->parent()->RemoveUnusedComputations());
return absl::OkStatus();
}
absl::Status TransformLoopForwardSink(const WhileLoopAnalysis& loop_analysis,
bool insert_non_alias_custom_call,
int64_t level_to_operate_on,
bool pipeline_use_tree,
bool process_different_sized_ops,
HloPredicate should_process,
int64_t& next_channel_id) {
absl::flat_hash_map<HloInstruction*, int64_t> is_output_instruction;
absl::flat_hash_map<const HloInstruction*, bool> invariant_cache;
HloInstruction* while_loop = loop_analysis.while_loop_instruction();
HloComputation* while_body = while_loop->while_body();
CHECK_EQ(while_body->parameter_instructions().size(), 1)
<< "Expected only one parameter";
HloInstruction* loop_parameter = while_body->parameter_instructions()[0];
HloInstruction* loop_init = while_loop->mutable_operand(0);
for (HloInstruction* inst : while_body->root_instruction()->operands()) {
if (inst->opcode() == HloOpcode::kDynamicUpdateSlice &&
inst->operand(1)->IsCustomCall(
CollectivePipeliner::kSunkByPreviousStep)) {
HloInstruction* cc = inst->mutable_operand(1);
TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(1, cc->mutable_operand(0)));
TF_RETURN_IF_ERROR(cc->parent()->RemoveInstruction(cc));
}
}
CHECK_EQ(while_body->root_instruction()->opcode(), HloOpcode::kTuple);
for (int i = 0; i < while_body->root_instruction()->operand_count(); ++i) {
is_output_instruction[while_body->root_instruction()->mutable_operand(i)] =
i;
}
HloComputation* loop_computation = while_loop->parent();
HloComputation* body_computation = while_loop->while_body();
std::vector<HloInstruction*> new_init_operands;
std::vector<Shape> new_parameter_shapes;
std::vector<HloInstruction*> new_root_operands;
absl::flat_hash_set<int64_t> indices_to_insert;
const int64_t operands_indices_count = loop_init->operand_count();
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
new_parameter_shapes.resize(operands_indices_count);
new_root_operands.resize(operands_indices_count);
new_init_operands.resize(operands_indices_count);
absl::flat_hash_set<int64_t> original_to_move_indices;
VLOG(1) << "Initial size for " << body_computation->name() << ": "
<< operands_indices_count;
absl::flat_hash_map<HloInstruction*, int64_t> collective_to_new_tuple_index;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
for (HloInstruction* collective : to_move.collectives_to_move) {
Shape shape =
ComputeFullOutputShape(to_move, collective->operand(0)->shape());
new_init_operands.push_back(
CreateZero(loop_computation, shape, shape.element_type()));
new_parameter_shapes.push_back(shape);
collective_to_new_tuple_index[collective] = new_root_operands.size();
indices_to_insert.insert(new_root_operands.size());
new_root_operands.push_back(collective->mutable_operand(0));
}
CHECK_EQ(to_move.dynamic_update_slices.size(),
to_move.output_indices.size());
for (int64_t i = 0; i < to_move.dynamic_update_slices.size(); ++i) {
int64_t output_idx = to_move.output_indices[i];
original_to_move_indices.insert(output_idx);
}
}
for (int i = 0; i < loop_parameter->shape().tuple_shapes().size(); ++i) {
if (original_to_move_indices.contains(i)) {
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_init_operands[i] = loop_init->mutable_operand(i);
continue;
}
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_init_operands[i] = loop_init->mutable_operand(i);
new_root_operands[i] = while_body->root_instruction()->mutable_operand(i);
}
VLOG(1) << "Size of " << body_computation->name()
<< " after adding collectives: " << new_root_operands.size();
absl::flat_hash_set<HloInstruction*> added_pipelined;
for (auto& move_info : loop_analysis.GetMoveInfos()) {
auto pipelined_instrs = CollectDependenciesToPipeline(
absl::MakeSpan(move_info.collectives_to_move),
absl::MakeSpan(move_info.formatting_ops));
for (auto* pipelined : pipelined_instrs) {
if (pipelined->opcode() == HloOpcode::kConstant) {
continue;
}
if (added_pipelined.contains(pipelined)) {
continue;
}
const bool is_loop_invariant =
IsLoopInvariant(pipelined, invariant_cache);
is_output_instruction[pipelined] = new_init_operands.size();
if (is_loop_invariant) {
new_parameter_shapes.push_back(pipelined->shape());
new_init_operands.push_back(
CreateZero(loop_computation, pipelined->shape(),
pipelined->shape().element_type()));
new_root_operands.push_back(pipelined);
added_pipelined.insert(pipelined);
continue;
}
Shape expanded_shape =
ComputeFullOutputShape(move_info, pipelined->shape());
new_parameter_shapes.push_back(expanded_shape);
new_init_operands.push_back(CreateZero(loop_computation, expanded_shape,
expanded_shape.element_type()));
Shape extra_trivial_dim_shape =
ShapeUtil::PrependMajorDimension(1, pipelined->shape());
HloInstruction* reshaped = body_computation->AddInstruction(
HloInstruction::CreateReshape(extra_trivial_dim_shape, pipelined));
Shape index_shape =
move_info.dynamic_update_slices.front()->index_shapes()[0];
std::vector<HloInstruction*> indices(
expanded_shape.dimensions_size(),
CreateZero(body_computation, index_shape,
index_shape.element_type()));
indices[0] = move_info.dynamic_update_slices.front()->index_operands()[0];
HloInstruction* input =
body_computation->AddInstruction(HloInstruction::CreateCustomCall(
expanded_shape,
{body_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0((int32_t)new_root_operands.size())))},
"PlaceHolder"));
reshaped = body_computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(expanded_shape, input,
reshaped, indices));
new_root_operands.push_back(reshaped);
added_pipelined.insert(pipelined);
}
}
VLOG(1) << "Size of " << body_computation->name()
<< " after adding dependencies: " << new_root_operands.size();
std::unique_ptr<HloInstruction> new_parameter =
HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes),
absl::StrCat("sink_", loop_parameter->name()));
for (auto& to_move : loop_analysis.GetMoveInfos()) {
for (HloInstruction* collective : to_move.collectives_to_move) {
int64_t new_tuple_index = collective_to_new_tuple_index[collective];
HloInstruction* collective_operand = collective->mutable_operand(0);
HloInstruction* to_insert =
body_computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::PrependMajorDimension(1, collective_operand->shape()),
collective_operand));
Shape expanded_shape =
ComputeFullOutputShape(to_move, collective_operand->shape());
HloInstruction* input =
body_computation->AddInstruction(HloInstruction::CreateCustomCall(
expanded_shape,
{body_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0((int32_t)new_tuple_index)))},
"PlaceHolder"));
HloDynamicUpdateSliceInstruction* dyn_update =
to_move.dynamic_update_slices[0];
std::vector<HloInstruction*> indices(
expanded_shape.dimensions_size(),
CreateZero(body_computation, dyn_update->index_shapes()[0],
dyn_update->index_shapes()[0].element_type()));
indices[0] = dyn_update->index_operands()[0];
to_insert = body_computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(expanded_shape, input,
to_insert, indices));
new_root_operands[new_tuple_index] = to_insert;
}
}
for (auto* p_user : body_computation->parameter_instructions()[0]->users()) {
CHECK_EQ(p_user->opcode(), HloOpcode::kGetTupleElement);
const int64_t tuple_idx = p_user->tuple_index();
if (!original_to_move_indices.contains(tuple_idx)) {
continue;
}
replacements[p_user] =
HloInstruction::CreateGetTupleElement(new_parameter.get(), tuple_idx);
std::vector<HloInstruction*> stack(p_user->users().begin(),
p_user->users().end());
new_root_operands[tuple_idx] = replacements[p_user].get();
while (!stack.empty()) {
auto* u = stack.back();
stack.pop_back();
replacements[u] = nullptr;
for (auto* user : u->users()) {
if (user == body_computation->root_instruction()) {
continue;
}
stack.push_back(user);
}
}
}
std::unique_ptr<HloInstruction> new_root_instr =
HloInstruction::CreateTuple(new_root_operands);
replacements[body_computation->parameter_instruction(0)] =
std::move(new_parameter);
replacements[body_computation->root_instruction()] =
std::move(new_root_instr);
replacements[while_loop->while_condition()->parameter_instruction(0)] =
HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes),
absl::StrCat(
"sink_",
while_loop->while_condition()->parameter_instruction(0)->name()));
HloInstruction* new_init = loop_computation->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
HloComputation* cloned_body =
body_computation->parent()->AddEmbeddedComputation(
body_computation->CloneWithReplacements(&replacements));
HloComputation* cloned_cond =
body_computation->parent()->AddEmbeddedComputation(
while_loop->while_condition()->CloneWithReplacements(&replacements));
for (int64_t i = 0; i < cloned_body->root_instruction()->operand_count();
++i) {
HloInstruction* output =
cloned_body->root_instruction()->mutable_operand(i);
if (output->opcode() != HloOpcode::kDynamicUpdateSlice) {
continue;
}
if (!output->operand(0)->IsCustomCall("PlaceHolder")) {
continue;
}
auto idx = Cast<HloConstantInstruction>(output->operand(0)->operand(0))
->literal()
.GetFirstInteger();
auto* new_param =
cloned_body->AddInstruction(HloInstruction::CreateGetTupleElement(
output->shape(), cloned_body->parameter_instruction(0), *idx));
HloInstruction* old_operand_param = output->mutable_operand(0);
TF_RETURN_IF_ERROR(output->ReplaceOperandWith(0, new_param));
TF_RETURN_IF_ERROR(
old_operand_param->parent()->RemoveInstruction(old_operand_param));
if (insert_non_alias_custom_call && indices_to_insert.contains(i)) {
auto* old_operand = output->mutable_operand(1);
auto* custom_call =
cloned_body->AddInstruction(HloInstruction::CreateCustomCall(
old_operand->shape(), {old_operand},
CollectivePipeliner::kSunkByPreviousStep));
TF_RETURN_IF_ERROR(output->ReplaceOperandWith(1, custom_call));
}
}
HloInstruction* new_while =
loop_computation->AddInstruction(HloInstruction::CreateWhile(
new_init->shape(), cloned_cond, cloned_body, new_init));
std::vector<HloInstruction*> new_output_tuple;
new_output_tuple.resize(operands_indices_count, nullptr);
InstructionMap pipelined_map;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
for (int64_t i = 0; i < to_move.collectives_to_move.size(); ++i) {
HloInstruction* collective = to_move.collectives_to_move[i];
int64_t gte_index = collective_to_new_tuple_index[collective];
HloInstruction* to_sink = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while, gte_index));
pipelined_map[collective->mutable_operand(0)] = to_sink;
}
const int64_t new_dim_limit =
to_move.dynamic_update_slices[0]->shape().dimensions(0);
auto pipelined_instrs = CollectDependenciesToPipeline(
absl::MakeSpan(to_move.collectives_to_move),
absl::MakeSpan(to_move.formatting_ops));
for (auto* original_pipelined : pipelined_instrs) {
if (original_pipelined->opcode() == HloOpcode::kConstant) {
continue;
}
const bool is_loop_invariant =
IsLoopInvariant(original_pipelined, invariant_cache);
CHECK(is_output_instruction.contains(original_pipelined));
int64_t pipelined_idx = is_output_instruction[original_pipelined];
HloInstruction* pipelined = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while, pipelined_idx));
if (is_loop_invariant) {
Shape full_shape = ComputeFullOutputShape(to_move, pipelined->shape());
absl::InlinedVector<int64_t, 4> operand_dims;
operand_dims.resize(pipelined->shape().dimensions_size());
absl::c_iota(operand_dims, 1);
HloInstruction* broadcasted =
loop_computation->AddInstruction(HloInstruction::CreateBroadcast(
full_shape, pipelined, operand_dims));
pipelined_map[original_pipelined] = broadcasted;
} else {
pipelined_map[original_pipelined] = pipelined;
}
}
for (HloInstruction* collective : to_move.collectives_to_move) {
HloInstruction* pipelined_instr_cloned =
loop_computation->AddInstruction(collective->CloneWithNewOperands(
ComputeFullOutputShape(to_move, collective->shape()),
{pipelined_map[collective->mutable_operand(0)]}));
UpdateInstructionChannelId(pipelined_instr_cloned, next_channel_id);
pipelined_map[collective] = pipelined_instr_cloned;
}
absl::flat_hash_set<HloInstruction*> to_add_batch_set;
auto collect_operands = [&pipelined_map, &to_add_batch_set,
loop_computation,
&to_move](HloInstruction* instr) {
std::vector<HloInstruction*> operands;
for (auto* operand : instr->mutable_operands()) {
if (operand->opcode() == HloOpcode::kConstant) {
HloInstruction* cloned_constant = loop_computation->AddInstruction(
operand->CloneWithNewOperands(operand->shape(), {}));
if (!to_add_batch_set.contains(instr)) {
operands.push_back(cloned_constant);
continue;
}
Shape full_shape =
ComputeFullOutputShape(to_move, cloned_constant->shape());
absl::InlinedVector<int64_t, 4> operand_dims;
operand_dims.resize(cloned_constant->shape().dimensions_size());
absl::c_iota(operand_dims, 1);
HloInstruction* broadcasted =
loop_computation->AddInstruction(HloInstruction::CreateBroadcast(
full_shape, cloned_constant, operand_dims));
operands.push_back(broadcasted);
continue;
}
auto it = pipelined_map.find(operand);
CHECK(it != pipelined_map.end());
operands.push_back(it->second);
}
return operands;
};
for (auto* current : to_move.formatting_ops) {
if (IsLoopInvariant(current, invariant_cache)) {
continue;
}
to_add_batch_set.insert(current);
}
for (HloInstruction* formatting_op : to_move.formatting_ops) {
if (pipelined_map.contains(formatting_op)) {
continue;
}
if (!to_add_batch_set.contains(formatting_op) &&
formatting_op->opcode() != HloOpcode::kBroadcast) {
HloInstruction* cloned_not_to_batch = loop_computation->AddInstruction(
formatting_op->CloneWithNewOperands(
formatting_op->shape(), collect_operands(formatting_op)));
UpdateInstructionChannelId(cloned_not_to_batch, next_channel_id);
pipelined_map[formatting_op] = cloned_not_to_batch;
continue;
}
if (formatting_op->IsElementwise() ||
formatting_op->opcode() == HloOpcode::kReshape ||
formatting_op->opcode() == HloOpcode::kAllReduce ||
formatting_op->opcode() == HloOpcode::kConvert ||
formatting_op->opcode() == HloOpcode::kCollectivePermute) {
HloInstruction* cloned_elementwise = loop_computation->AddInstruction(
formatting_op->CloneWithNewOperands(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collect_operands(formatting_op)));
pipelined_map[formatting_op] = cloned_elementwise;
continue;
}
if (formatting_op->opcode() == HloOpcode::kReduce) {
auto operands = collect_operands(formatting_op);
std::vector<int64_t> dimensions(formatting_op->dimensions().begin(),
formatting_op->dimensions().end());
for (auto& dim : dimensions) {
++dim;
}
if (operands[1]->opcode() == HloOpcode::kBroadcast) {
CHECK(operands[1]->operand(0)->opcode() == HloOpcode::kConstant);
operands[1] = operands[1]->mutable_operand(0);
}
HloInstruction* expanded_reduce =
loop_computation->AddInstruction(HloInstruction::CreateReduce(
ComputeFullOutputShape(to_move, formatting_op->shape()),
operands[0], operands[1], dimensions,
formatting_op->to_apply()));
pipelined_map[formatting_op] = expanded_reduce;
continue;
}
if (formatting_op->opcode() == HloOpcode::kBroadcast) {
auto operands = collect_operands(formatting_op);
std::vector<int64_t> dimensions(1, 0);
for (const int64_t dim : formatting_op->dimensions()) {
dimensions.push_back(dim + 1);
}
if (operands[0]->shape().dimensions_size() == 0) {
dimensions.clear();
}
HloInstruction* expanded_broadcast =
loop_computation->AddInstruction(HloInstruction::CreateBroadcast(
ComputeFullOutputShape(to_move, formatting_op->shape()),
operands[0], dimensions));
pipelined_map[formatting_op] = expanded_broadcast;
continue;
}
if (formatting_op->opcode() == HloOpcode::kSlice) {
std::vector<int64_t> slice_start = formatting_op->slice_starts();
std::vector<int64_t> slice_limits = formatting_op->slice_limits();
std::vector<int64_t> slice_strides = formatting_op->slice_strides();
slice_start.insert(slice_start.begin(), 0);
slice_limits.insert(slice_limits.begin(), new_dim_limit);
slice_strides.insert(slice_strides.begin(), 1);
HloInstruction* expanded_slice =
loop_computation->AddInstruction(HloInstruction::CreateSlice(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collect_operands(formatting_op)[0], slice_start, slice_limits,
slice_strides));
pipelined_map[formatting_op] = expanded_slice;
continue;
}
if (formatting_op->opcode() == HloOpcode::kDynamicSlice) {
std::vector<int64_t> dynamic_slice_sizes =
formatting_op->dynamic_slice_sizes();
dynamic_slice_sizes.insert(dynamic_slice_sizes.begin(), new_dim_limit);
HloDynamicSliceInstruction* dynslice =
Cast<HloDynamicSliceInstruction>(formatting_op);
HloInstruction* zero = loop_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(
formatting_op->operand(dynslice->first_index_operand_number())
->shape()
.element_type())));
std::vector<HloInstruction*> indices(1, zero);
auto collected_operands = collect_operands(formatting_op);
indices.insert(indices.end(), std::next(collected_operands.begin()),
collected_operands.end());
HloInstruction* expanded_dynslice =
loop_computation->AddInstruction(HloInstruction::CreateDynamicSlice(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collected_operands[0], indices, dynamic_slice_sizes));
pipelined_map[formatting_op] = expanded_dynslice;
continue;
}
if (formatting_op->opcode() == HloOpcode::kPad) {
HloPadInstruction* pad_instruction =
Cast<HloPadInstruction>(formatting_op);
PaddingConfig p_config = pad_instruction->padding_config();
PaddingConfig new_p_config;
new_p_config.add_dimensions();
for (auto& dim : p_config.dimensions()) {
auto* new_dim = new_p_config.add_dimensions();
*new_dim = dim;
}
auto new_operands = collect_operands(formatting_op);
HloInstruction* expanded_pad =
loop_computation->AddInstruction(HloInstruction::CreatePad(
ComputeFullOutputShape(to_move, formatting_op->shape()),
new_operands[0], new_operands[1], new_p_config));
pipelined_map[formatting_op] = expanded_pad;
continue;
}
if (formatting_op->opcode() == HloOpcode::kTranspose) {
HloTransposeInstruction* transpose_instruction =
Cast<HloTransposeInstruction>(formatting_op);
std::vector<int64_t> new_dims(
transpose_instruction->dimensions().begin(),
transpose_instruction->dimensions().end());
new_dims.insert(new_dims.begin(), 0);
for (int64_t& dim : new_dims) {
++dim;
}
HloInstruction* expanded_transpose =
loop_computation->AddInstruction(HloInstruction::CreateTranspose(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collect_operands(formatting_op)[0], new_dims));
pipelined_map[formatting_op] = expanded_transpose;
continue;
}
CHECK(false) << "Unsupported instruction " << formatting_op->ToString();
}
for (int64_t i = 0; i < to_move.output_indices.size(); ++i) {
HloDynamicUpdateSliceInstruction* d_update =
to_move.dynamic_update_slices[i];
HloInstruction* inserted_operand = d_update->mutable_operand(1);
CHECK(pipelined_map.contains(inserted_operand))
<< "Expected to be processed";
HloInstruction* expanded_inserted = pipelined_map[inserted_operand];
if (!ShapeUtil::Compatible(expanded_inserted->shape(),
d_update->shape())) {
expanded_inserted =
loop_computation->AddInstruction(HloInstruction::CreateReshape(
d_update->shape(), expanded_inserted));
}
new_output_tuple[to_move.output_indices[i]] = expanded_inserted;
}
}
for (int64_t i = 0; i < operands_indices_count; ++i) {
if (new_output_tuple[i] != nullptr) {
continue;
}
new_output_tuple[i] = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while, i));
}
HloInstruction* new_tuple = loop_computation->AddInstruction(
HloInstruction::CreateTuple(new_output_tuple));
TF_RETURN_IF_ERROR(while_loop->ReplaceAllUsesWithDifferentShape(new_tuple));
TF_RETURN_IF_ERROR(
loop_computation->RemoveInstructionAndUnusedOperands(while_loop));
TF_RETURN_IF_ERROR(loop_computation->parent()->RemoveUnusedComputations());
return absl::OkStatus();
}
static absl::Status TransformLoopBackward(
const WhileLoopAnalysis& loop_analysis, bool insert_non_alias_custom_call,
int64_t level_to_operate_on, bool process_different_sized_ops,
HloPredicate should_process, HloPredicate acceptable_formatting,
CollectivePipeliner::HloPostprocessor postprocess_peeled,
CollectivePipeliner::HloPostprocessor postprocess_rotated,
int64_t& next_channel_id) {
absl::flat_hash_map<HloInstruction*, HloInstruction*> while_body_to_peeled;
absl::flat_hash_map<HloInstruction*, int64_t> collective_to_move_map;
absl::flat_hash_set<HloInstruction*> is_pipelined_instruction;
absl::flat_hash_map<HloInstruction*, int64_t> is_output_instruction;
absl::flat_hash_set<const HloInstruction*> sideeffect_unused_instructions;
int64_t count = 0;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
CHECK_EQ(to_move.collectives_to_move.size(), 1);
HloInstruction* instr = to_move.collectives_to_move[0];
collective_to_move_map[instr] = count;
is_pipelined_instruction.insert(instr);
is_pipelined_instruction.insert(to_move.formatting_ops.begin(),
to_move.formatting_ops.end());
++count;
if (instr->operand_count() == 1) {
const HloInstruction* opnd = instr->operand(0);
if (opnd->HasSideEffect() && opnd->user_count() == 1) {
sideeffect_unused_instructions.insert(opnd);
}
}
}
HloInstruction* while_loop = loop_analysis.while_loop_instruction();
HloComputation* while_body = while_loop->while_body();
CHECK_EQ(while_body->parameter_instructions().size(), 1)
<< "Expected only one parameter";
HloInstruction* loop_parameter = while_body->parameter_instructions()[0];
HloInstruction* loop_initial_iteration_idx =
while_loop->mutable_operand(0)->mutable_operand(
*loop_analysis.GetLoopIterationIdx());
while_body_to_peeled[loop_parameter] = while_loop;
CHECK_EQ(while_body->root_instruction()->opcode(), HloOpcode::kTuple);
for (int i = 0; i < while_body->root_instruction()->operand_count(); ++i) {
is_output_instruction[while_body->root_instruction()->mutable_operand(i)] =
i;
}
std::vector<HloInstruction*> new_init_operands;
std::vector<Shape> new_parameter_shapes;
std::vector<HloInstruction*> new_root_operands;
const int64_t operands_indices_count =
while_loop->shape().tuple_shapes_size() +
loop_analysis.GetMoveInfos().size() + 1;
new_parameter_shapes.resize(operands_indices_count);
new_root_operands.resize(operands_indices_count);
new_init_operands.resize(operands_indices_count);
for (int i = 0; i < loop_parameter->shape().tuple_shapes_size(); ++i) {
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_root_operands[i] = while_body->root_instruction()->mutable_operand(i);
new_init_operands[i] = while_loop->mutable_operand(0)->mutable_operand(i);
}
InstructionMap chain_clone_map;
chain_clone_map[loop_parameter] = while_loop->mutable_operand(0);
for (auto* u : loop_parameter->users()) {
if (IsLoopIterator(u, *loop_analysis.GetLoopIterationIdx())) {
chain_clone_map[u] = loop_initial_iteration_idx;
}
}
for (int i = 0; i < loop_analysis.GetMoveInfos().size(); ++i) {
const int64_t idx = i + loop_parameter->shape().tuple_shapes_size();
new_parameter_shapes[idx] =
loop_analysis.GetMoveInfos()[i].collectives_to_move[0]->shape();
new_root_operands[idx] =
loop_analysis.GetMoveInfos()[i].collectives_to_move[0];
TF_ASSIGN_OR_RETURN(
new_init_operands[idx],
CloneBackwardChain(*while_loop->parent(),
loop_analysis.GetMoveInfos()[i], chain_clone_map,
*loop_analysis.GetLoopIterationIdx(),
next_channel_id));
if (postprocess_peeled.has_value()) {
TF_RETURN_IF_ERROR(postprocess_peeled.value()(new_init_operands[idx]));
}
}
ConstantValue next_loop_iteration =
loop_analysis.GetLoopStart()->add(*loop_analysis.GetLoopIncrement());
const Shape& loop_index_shape =
while_loop->shape().tuple_shapes(*loop_analysis.GetLoopIterationIdx());
HloInstruction* next_iteration_idx = while_loop->parent()->AddInstruction(
HloInstruction::CreateConstant(*CreateLiteralOfShape(
loop_index_shape, next_loop_iteration.GetSignedValue())));
new_parameter_shapes.back() = loop_parameter->shape().tuple_shapes(
*loop_analysis.GetLoopIterationIdx());
new_init_operands.back() = next_iteration_idx;
auto body_builder = HloComputation::Builder(while_body->name());
HloInstruction* new_loop_param =
body_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes), "param"));
HloInstruction* loop_iterator_for_pipelined_instrs =
body_builder.AddInstruction(HloInstruction::CreateGetTupleElement(
new_loop_param, new_init_operands.size() - 1));
InstructionMap while_body_replacement_map;
while_body_replacement_map[loop_parameter] = new_loop_param;
InstructionMap collective_to_move_clone_map;
collective_to_move_clone_map[loop_parameter] = new_loop_param;
for (auto* u : loop_parameter->users()) {
if (IsLoopIterator(u, *loop_analysis.GetLoopIterationIdx())) {
collective_to_move_clone_map[u] = loop_iterator_for_pipelined_instrs;
}
}
LoopVariantParameterInfo loop_variant_parameter_info;
for (auto* instr : while_body->MakeInstructionPostOrder()) {
if (instr == loop_parameter || instr == while_body->root_instruction() ||
sideeffect_unused_instructions.contains(instr)) {
continue;
}
HloInstruction* cloned_instr = nullptr;
auto it = collective_to_move_map.find(instr);
if (it != collective_to_move_map.end()) {
TF_ASSIGN_OR_RETURN(
cloned_instr,
CloneBackwardChain(body_builder,
loop_analysis.GetMoveInfos()[it->second],
collective_to_move_clone_map,
*loop_analysis.GetLoopIterationIdx(),
next_channel_id, &loop_variant_parameter_info));
if (postprocess_rotated.has_value()) {
TF_RETURN_IF_ERROR(postprocess_rotated.value()(cloned_instr));
}
} else {
auto new_operands =
MapNewOperands(instr->operands(), while_body_replacement_map);
cloned_instr = body_builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(instr, cloned_instr,
while_body_replacement_map));
UpdateInstructionChannelId(cloned_instr, next_channel_id);
}
if (it != collective_to_move_map.end()) {
const int64_t tuple_idx =
while_loop->shape().tuple_shapes_size() + it->second;
HloInstruction* pipelined_value = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(new_loop_param, tuple_idx));
while_body_replacement_map[instr] = pipelined_value;
new_root_operands[tuple_idx] = cloned_instr;
continue;
}
while_body_replacement_map[instr] = cloned_instr;
}
for (const auto& [idx, value] : loop_variant_parameter_info) {
auto it = while_body_replacement_map.find(new_root_operands[idx]);
CHECK(it != while_body_replacement_map.end())
<< new_root_operands[idx]->ToString() << " not present in map";
TF_RETURN_IF_ERROR(value->ReplaceAllUsesWith(it->second));
}
new_root_operands.back() =
body_builder.AddInstruction(HloInstruction::CreateBinary(
loop_index_shape, HloOpcode::kAdd,
while_body_replacement_map
[new_root_operands[*loop_analysis.GetLoopIterationIdx()]],
body_builder.AddInstruction(
HloInstruction::CreateConstant(*CreateLiteralOfShape(
loop_index_shape, next_loop_iteration.GetSignedValue())))));
HloInstruction* new_loop_root =
body_builder.AddInstruction(HloInstruction::CreateTuple(
MapNewOperands(new_root_operands, while_body_replacement_map,
true)));
while_body_replacement_map[while_body->root_instruction()] = new_loop_root;
HloComputation* new_while_body =
while_loop->GetModule()->AddEmbeddedComputation(
body_builder.Build(new_loop_root));
TF_RETURN_IF_ERROR(UpdateControlDependencies(while_body->root_instruction(),
new_loop_root,
while_body_replacement_map));
for (HloInstruction* instruction : new_while_body->instructions()) {
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
instruction, false, CollectivePipeliner::PipeliningDirection::kBackward,
loop_analysis));
}
auto cond_builder =
HloComputation::Builder(while_loop->while_condition()->name());
HloInstruction* new_cond_param =
cond_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes), "cond_param"));
HloInstruction* loop_bound = cond_builder.AddInstruction(
HloInstruction::CreateConstant(*CreateLiteralOfShape(
loop_initial_iteration_idx->shape(),
loop_analysis.GetLoopStart()
->add(loop_analysis.GetLoopIterationCount()
->sub(ConstantValue::GetOne(
loop_analysis.GetLoopStart()->GetBitwidth(),
loop_analysis.GetLoopStart()->IsSigned()))
.mul(*loop_analysis.GetLoopIncrement()))
.GetSignedValue())));
ComparisonDirection cd =
loop_analysis.GetLoopIncrement()->GetSignedValue() > 0
? ComparisonDirection::kLt
: ComparisonDirection::kGt;
HloInstruction* loop_iterator =
cond_builder.AddInstruction(HloInstruction::CreateGetTupleElement(
new_cond_param, *loop_analysis.GetLoopIterationIdx()));
HloInstruction* comparison =
cond_builder.AddInstruction(HloInstruction::CreateCompare(
while_loop->while_condition()->root_instruction()->shape(),
loop_iterator, loop_bound, cd));
HloComputation* new_while_condition =
while_loop->GetModule()->AddEmbeddedComputation(
cond_builder.Build(comparison));
HloInstruction* new_loop_init = while_loop->parent()->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(while_body->root_instruction(),
new_loop_init, chain_clone_map));
HloInstruction* new_while_loop =
while_loop->parent()->AddInstruction(HloInstruction::CreateWhile(
new_while_body->root_instruction()->shape(), new_while_condition,
new_while_body, new_loop_init));
while_body_replacement_map.clear();
while_body_replacement_map[loop_parameter] = new_while_loop;
std::vector<HloInstruction*> output_tuple_instructions(
while_loop->shape().tuple_shapes_size(), nullptr);
for (auto* instr : while_body->MakeInstructionPostOrder()) {
if (instr == loop_parameter || instr == while_body->root_instruction() ||
sideeffect_unused_instructions.contains(instr)) {
continue;
}
auto instruction_is_output_it = is_output_instruction.find(instr);
auto it = collective_to_move_map.find(instr);
if (it != collective_to_move_map.end()) {
const int64_t tuple_idx =
while_loop->shape().tuple_shapes_size() + it->second;
HloInstruction* pipelined_value = while_loop->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_loop, tuple_idx));
while_body_replacement_map[instr] = pipelined_value;
if (instruction_is_output_it != is_output_instruction.end()) {
output_tuple_instructions[instruction_is_output_it->second] =
pipelined_value;
}
continue;
}
auto new_operands =
MapNewOperands(instr->operands(), while_body_replacement_map);
HloInstruction* cloned_instr = while_loop->parent()->AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(instr, cloned_instr,
while_body_replacement_map));
UpdateInstructionChannelId(cloned_instr, next_channel_id);
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
cloned_instr, true, CollectivePipeliner::PipeliningDirection::kBackward,
loop_analysis));
while_body_replacement_map[instr] = cloned_instr;
if (instruction_is_output_it != is_output_instruction.end()) {
output_tuple_instructions[instruction_is_output_it->second] =
cloned_instr;
}
}
HloInstruction* final_loop_output = while_loop->parent()->AddInstruction(
HloInstruction::CreateTuple(output_tuple_instructions));
HloComputation* loop_computation = while_loop->parent();
TF_RETURN_IF_ERROR(
while_loop->ReplaceAllUsesWithDifferentShape(final_loop_output));
TF_RETURN_IF_ERROR(
loop_computation->RemoveInstructionAndUnusedOperands(while_loop));
TF_RETURN_IF_ERROR(loop_computation->parent()->RemoveUnusedComputations());
return absl::OkStatus();
}
absl::StatusOr<bool> CollectivePipeliner::RunPipeliner(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to_analysis,
TuplePointsToAnalysis::Run(module));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
std::vector<std::pair<HloInstruction*, std::unique_ptr<WhileLoopAnalysis>>>
loop_analyses;
for (HloComputation* computation : module->MakeComputationPostOrder()) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
if (std::none_of(instruction->while_body()->instructions().begin(),
instruction->while_body()->instructions().end(),
config_.should_process)) {
continue;
}
VLOG(1) << "Pipelinable while: " << instruction->name();
auto loop_analysis = std::make_unique<WhileLoopAnalysis>(
instruction, config_.max_pipelining_per_loop,
config_.pipeline_use_tree, config_.process_different_sized_ops,
tuple_points_to_analysis.get(), call_graph.get());
loop_analysis->ComputeLoopStatistics();
if (loop_analysis->GetLoopIterationCount() &&
loop_analysis->GetLoopIterationCount()->GetUnsignedValue() > 0) {
loop_analyses.push_back(
std::make_pair(instruction, std::move(loop_analysis)));
}
}
}
int64_t transformed_loops = 0;
int64_t transformed_instructions = 0;
int64_t next_channel_id = hlo_query::NextChannelId(*module);
VLOG(1) << "Pipelining on direction: "
<< GetPipelineDirectionString(config_.pipelining_direction);
for (auto& [instruction, loop_analysis] : loop_analyses) {
VLOG(1) << "While iterations: "
<< loop_analysis->GetLoopIterationCount()->ToString();
loop_analysis->CollectCollectivesToMove(
config_.level_to_operate_on, config_.pipelining_direction,
config_.should_process, config_.acceptable_formatting,
config_.should_allow_loop_variant_parameter_in_chain,
config_.should_allow_control_dependencies,
config_.should_add_loop_invariant_op_in_chain);
if (loop_analysis->GetMoveInfos().empty()) {
continue;
}
transformed_instructions += loop_analysis->GetMoveInfos().size();
VLOG(1) << "Found Collectives to optimize";
if (VLOG_IS_ON(1)) {
int64_t id = 0;
for (auto& to_move : loop_analysis->GetMoveInfos()) {
VLOG(1) << "MoveInfo #" << id++ << "\n" << ToString(to_move);
}
}
if (config_.pipelining_direction == PipeliningDirection::kForward) {
CHECK(config_.reuse_pipelined_op_buffer);
TF_RETURN_IF_ERROR(TransformLoopForward(
*loop_analysis, !config_.last_run, config_.level_to_operate_on,
config_.pipeline_use_tree, config_.process_different_sized_ops,
config_.should_process, config_.acceptable_formatting,
config_.reuse_pipelined_op_buffer, next_channel_id));
} else if (config_.pipelining_direction ==
PipeliningDirection::kForwardSink) {
TF_RETURN_IF_ERROR(TransformLoopForwardSink(
*loop_analysis, !config_.last_run, config_.level_to_operate_on,
config_.pipeline_use_tree, config_.process_different_sized_ops,
config_.should_process, next_channel_id));
} else {
CHECK_EQ(config_.pipelining_direction, PipeliningDirection::kBackward);
TF_RETURN_IF_ERROR(TransformLoopBackward(
*loop_analysis, !config_.last_run, config_.level_to_operate_on,
config_.process_different_sized_ops, config_.should_process,
config_.acceptable_formatting, config_.postprocess_backward_peeled_op,
config_.postprocess_backward_rotated_op, next_channel_id));
}
++transformed_loops;
changed = true;
}
if (config_.last_run) {
std::vector<HloInstruction*> to_remove;
for (HloComputation* computation : module->MakeComputationPostOrder()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
CollectivePipeliner::kInsertedByPreviousStep)) {
to_remove.push_back(instruction);
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(instruction->mutable_operand(0)));
changed = true;
}
}
}
for (auto* instruction : to_remove) {
TF_RETURN_IF_ERROR(
instruction->parent()->RemoveInstructionAndUnusedOperands(
instruction));
}
}
VLOG(1) << "Transformed loops: " << transformed_loops
<< " and transformed instructions: " << transformed_instructions
<< " for pipelining direction: "
<< GetPipelineDirectionString(config_.pipelining_direction);
if (changed) {
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
return changed;
}
absl::StatusOr<bool> CollectivePipeliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
CHECK(config_.acceptable_formatting);
CHECK(config_.should_process);
if (config_.pipelining_direction != PipeliningDirection::kForwardSink) {
return RunPipeliner(module, execution_threads);
}
bool changed = true;
int64_t iter = 0;
while (changed) {
TF_ASSIGN_OR_RETURN(changed, RunPipeliner(module, execution_threads));
VLOG(1) << "Finished running pipeliner's iteration: " << iter;
iter++;
}
return iter > 1;
}
} | #include "xla/service/collective_pipeliner.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace {
using ::testing::_;
namespace op = xla::testing::opcode_matchers;
class CollectivePipelinerTest : public HloTestBase {
public:
CollectivePipelinerTest() {
const int64_t kNumReplicas = 4;
const int64_t kNumComputations = 2;
config_ = GetModuleConfigForTest(kNumReplicas,
kNumComputations);
}
protected:
const HloPredicate IsAllGather = HloPredicateIsOp<HloOpcode::kAllGather>;
HloModuleConfig config_;
};
absl::StatusOr<bool> RunOptimizer(
HloModule* module, bool last_run, int64_t level_to_operate_on = 0,
bool pipeline_use_tree = false, bool process_different_sized_ops = true,
CollectivePipeliner::PipeliningDirection direction =
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicate should_process = HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicate acceptable_formatting = HloPredicateTrue,
HloPredicate reuse_pipelined_op_buffer = HloPredicateTrue,
HloPredicate should_allow_loop_variant_parameter_in_chain =
HloPredicateFalse,
CollectivePipeliner::HloPostprocessor postprocess_backward_peeled =
std::nullopt,
CollectivePipeliner::HloPostprocessor postprocess_backward_rotated =
std::nullopt,
bool should_add_loop_invariant_op_in_chain = false) {
CollectivePipeliner::Config config = {
level_to_operate_on,
INT64_MAX,
last_run,
pipeline_use_tree,
process_different_sized_ops,
direction,
should_process,
acceptable_formatting,
reuse_pipelined_op_buffer,
should_allow_loop_variant_parameter_in_chain,
false, postprocess_backward_peeled,
postprocess_backward_rotated, should_add_loop_invariant_op_in_chain};
HloPassPipeline pass("optimizer");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<CollectivePipeliner>(config);
pass.AddPass<HloVerifier>(false,
false);
return pass.Run(module);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOne) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(1);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(1);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, BitcastAsUser) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
current-loop-index = s32[] get-tuple-element(param), index=0
output-buffer = bf16[3,8,128] get-tuple-element(param), index=1
input-buffer = bf16[3,8,128] get-tuple-element(param), index=2
constant.1 = s32[] constant(1)
next-loop-index = s32[] add(current-loop-index, constant.1)
constant.0 = s32[] constant(0)
sliced-input-buffer = bf16[1,8,128] dynamic-slice(input-buffer, current-loop-index, constant.0, constant.0), dynamic_slice_sizes={1,8,128}
all-reduce = bf16[1,8,128] all-reduce(sliced-input-buffer), replica_groups={}, to_apply=add, channel_id=1
bitcast.0 = u16[3,8,128] bitcast(all-reduce)
bitcast.1 = bf16[3,8,128] bitcast(bitcast.0)
dynamic-update-slice = bf16[3,8,128] dynamic-update-slice(output-buffer, bitcast.1, current-loop-index, constant.0, constant.0)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(next-loop-index, dynamic-update-slice, input-buffer)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::Bitcast(), _, _, _));
const HloInstruction* cast_back = root->operand(1);
EXPECT_EQ(cast_back->opcode(), HloOpcode::kBitcast);
const HloInstruction* cast_to = cast_back->operand(0);
EXPECT_EQ(cast_to->opcode(), HloOpcode::kBitcast);
const HloInstruction* ar = cast_to->operand(0);
EXPECT_EQ(ar->opcode(), HloOpcode::kAllReduce);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.5), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,0}},
frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9},{4,10},{5,11},{6,12},{7,13}}"}
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(1);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneCollectivePermuteBackwardCycle) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.5), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}},
frontend_attributes={_xla_send_recv_validation="{{7,13},{6,12},{5,11},{4,10},{3,9},{2,8},{1,7},{0,6}}"}
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(1);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest, UpdateSendRecvChannelIdForHostTransfers) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
after-all = after-all()
send.88 = (s32[], u32[], token[]) send(
add.232, after-all), channel_id=2, is_host_transfer=true
send-done.88 = token[] send-done(send.88), channel_id=2, is_host_transfer=true
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
auto* entry_comp = module->entry_computation();
auto* unrolled_send_done = entry_comp->GetInstructionWithName("send-done.0");
ASSERT_THAT(unrolled_send_done, ::testing::NotNull());
auto* unrolled_send = unrolled_send_done->operand(0);
auto channel_id = [](const HloInstruction* instr) {
return DynCast<HloChannelInstruction>(instr)->channel_id();
};
EXPECT_EQ(channel_id(unrolled_send), channel_id(unrolled_send_done));
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNoReuse) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(
module.get(), true, 0, false, true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kAllReduce>,
[](const HloInstruction* i) { return true; },
[](const HloInstruction* i) { return false; })
.value());
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 5);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNotFirstIdx) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[8,3,128], bf16[8,3,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[8,3,128], bf16[8,3,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[8,3,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[8,3,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[8,1,128] dynamic-slice(get-tuple-element.5, constant.2561, select.1348, constant.2561), dynamic_slice_sizes={8,1,128}
mul = bf16[8,1,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[8,1,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[8,3,128] dynamic-update-slice(get-tuple-element.395, ar.1, constant.2561, select.1348, constant.2561)
ROOT tuple = (s32[], bf16[8,3,128], bf16[8,3,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[8,3,128] parameter(0)
tuple = (s32[], bf16[8,3,128], bf16[8,3,128]) tuple(c0, p0, p0)
while = (s32[], bf16[8,3,128], bf16[8,3,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[8,3,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(2);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(2);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwo) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(1);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(1);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, NoTransformCantProveIndexDoesntWrap) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(4)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-1)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformNegativeIndexIterationToZero) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false).value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(
_,
op::CustomCall(op::AllReduce(op::DynamicSlice(
op::GetTupleElement(op::While()),
op::GetTupleElement(),
op::Constant(), op::Constant())),
op::Constant()),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, EscapedInputNoTransform) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.911 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-slice.911, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,8,128] parameter(0)
cc = bf16[] constant(0)
c1 = bf16[1,8,128] broadcast(cc), dimensions={}
c2 = bf16[3,8,128] broadcast(cc), dimensions={}
tuple = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) tuple(c0, p0, c1, c2)
while = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
XLA_VLOG_LINES(1, module->ToString());
EXPECT_FALSE(RunOptimizer(module.get(), true).value());
}
TEST_F(CollectivePipelinerTest, TransformWithAg) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
rs.1 = bf16[1,1,128] reduce-scatter(mul), replica_groups={}, to_apply=add, channel_id=1, dimensions={1}
ag.1 = bf16[1,8,128] all-gather(rs.1), replica_groups={}, channel_id=2, dimensions={1}
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ag.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,8,128] parameter(0)
cc = bf16[] constant(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(
_, op::AllGather(op::GetTupleElement(op::While())),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, TransformWithAgWithFormatting) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,9,128], bf16[3,9,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,9,128], bf16[3,9,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,9,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,9,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,9,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,9,128}
mul = bf16[1,9,128] multiply(dynamic-slice.99, dynamic-slice.99)
cpd = bf16[] constant(0)
%pd = bf16[1,16,128] pad(mul, cpd), padding=0_0x0_7x0_0
rs.1 = bf16[1,2,128] reduce-scatter(pd), replica_groups={}, to_apply=add, channel_id=1, dimensions={1}
ag.1 = bf16[1,16,128] all-gather(rs.1), replica_groups={}, channel_id=2, dimensions={1}
slc = bf16[1,9,128] slice(ag.1), slice={[0:1], [0:9], [0:128]}
dynamic-update-slice.35 = bf16[3,9,128] dynamic-update-slice(get-tuple-element.395, slc, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,9,128], bf16[3,9,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,9,128] parameter(0)
cc = bf16[] constant(0)
tuple = (s32[], bf16[3,9,128], bf16[3,9,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,9,128], bf16[3,9,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,9,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::DynamicUpdateSlice(
_, op::Slice(op::AllGather(op::GetTupleElement(op::While()))),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, TransformWithAgInsertCustomCall) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
constant.2561 = s32[] constant(0)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, get-tuple-element.394, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
rs.1 = bf16[1,1,128] reduce-scatter(mul), replica_groups={}, to_apply=add, channel_id=1, dimensions={1}
ag.1 = bf16[1,8,128] all-gather(rs.1), replica_groups={}, channel_id=2, dimensions={1}
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ag.1, get-tuple-element.394, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-8)
p0 = bf16[3,8,128] parameter(0)
cc = bf16[] constant(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
RunOptimizer(module.get(), true, 1).value();
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(
_, op::AllGather(op::GetTupleElement(op::While())),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, PushAgOver) {
constexpr absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(bf16[3,8,128]{2,1,0})->bf16[3,8,128]{2,1,0}}
%add (lhs: bf16[], rhs: bf16[]) -> bf16[] {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %lhs, bf16[] %rhs)
}
%while_body.clone (loop_peel_param: (s32[], bf16[3,8,128], s32[])) -> (s32[], bf16[3,8,128], s32[]) {
%loop_peel_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%get-tuple-element.2 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=0
%constant.7 = s32[] constant(1)
%add.4 = s32[] add(s32[] %get-tuple-element.2, s32[] %constant.7)
%get-tuple-element.3 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=1
%get-tuple-element.4 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=2
%constant.12 = s64[] constant(1)
%custom-call = s32[] custom-call(s32[] %get-tuple-element.4, s64[] %constant.12), custom_call_target="InsertedByPreviousStep"
%constant.13 = s32[] constant(0)
%constant.10 = s32[] constant(0)
%dynamic-slice.2 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13), dynamic_slice_sizes={1,8,128}
%ar.2 = bf16[1,1,128]{2,1,0} reduce-scatter(bf16[1,8,128]{2,1,0} %dynamic-slice.2), channel_id=2, replica_groups={}, to_apply=%add, dimensions={1}
%ag.2 = bf16[1,8,128]{2,1,0} all-gather(bf16[1,1,128]{2,1,0} %ar.2), channel_id=32, replica_groups={}, dimensions={1}
%dynamic-update-slice.2 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, bf16[1,8,128]{2,1,0} %ag.2, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13)
%dynamic-slice.1 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %get-tuple-element.2, s32[] %constant.10, s32[] %constant.10), dynamic_slice_sizes={1,8,128}
%mul.2 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.1, bf16[1,8,128]{2,1,0} %dynamic-slice.1)
%constant.15 = s32[] constant(0)
%dynamic-update-slice.4 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %dynamic-update-slice.2, bf16[1,8,128]{2,1,0} %mul.2, s32[] %get-tuple-element.2, s32[] %constant.15, s32[] %constant.15)
ROOT %tuple.3 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.4, bf16[3,8,128]{2,1,0} %dynamic-update-slice.4, s32[] %get-tuple-element.2)
}
%while_cond.clone (loop_peel_cond_param: (s32[], bf16[3,8,128], s32[])) -> pred[] {
%loop_peel_cond_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%gte.1 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_cond_param), index=0
%constant.6 = s32[] constant(0)
ROOT %cmp.1 = pred[] compare(s32[] %gte.1, s32[] %constant.6), direction=LT
}
ENTRY %entry (p0: bf16[3,8,128]) -> bf16[3,8,128] {
%c0 = s32[] constant(-3)
%p0 = bf16[3,8,128]{2,1,0} parameter(0)
%tuple.1 = (s32[], bf16[3,8,128]{2,1,0}) tuple(s32[] %c0, bf16[3,8,128]{2,1,0} %p0)
%get-tuple-element.0 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=0
%constant.0 = s32[] constant(1)
%constant.4 = s32[] constant(0)
%add.1 = s32[] add(s32[] %get-tuple-element.0, s32[] %constant.0)
%get-tuple-element.1 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=1
%dynamic-slice.0 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4), dynamic_slice_sizes={1,8,128}
%mul.1 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.0, bf16[1,8,128]{2,1,0} %dynamic-slice.0)
%dynamic-update-slice.0 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, bf16[1,8,128]{2,1,0} %mul.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4)
%tuple.4 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.1, bf16[3,8,128]{2,1,0} %dynamic-update-slice.0, s32[] %get-tuple-element.0)
%while.1 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) while((s32[], bf16[3,8,128]{2,1,0}, s32[]) %tuple.4), condition=%while_cond.clone, body=%while_body.clone
%get-tuple-element.6 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=1
%get-tuple-element.5 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=2
%constant.14 = s32[] constant(0)
%dynamic-slice.3 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14), dynamic_slice_sizes={1,8,128}
%ar.3 = bf16[1,8,128]{2,1,0} all-reduce(bf16[1,8,128]{2,1,0} %dynamic-slice.3), channel_id=3, replica_groups={}, to_apply=%add
ROOT %dynamic-update-slice.3 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, bf16[1,8,128]{2,1,0} %ar.3, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 1,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::DynamicUpdateSlice(
op::DynamicUpdateSlice(_, op::AllGather(), _, _, _),
op::AllReduce(op::DynamicSlice(op::DynamicUpdateSlice(), _, _, _)),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, NoPushAgOverBecauseDifferentSize) {
constexpr absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(bf16[3,8,128]{2,1,0})->bf16[3,8,128]{2,1,0}}
%add (lhs: bf16[], rhs: bf16[]) -> bf16[] {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %lhs, bf16[] %rhs)
}
%while_body.clone (loop_peel_param: (s32[], bf16[3,8,128], s32[])) -> (s32[], bf16[3,8,128], s32[]) {
%loop_peel_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%get-tuple-element.2 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=0
%constant.7 = s32[] constant(1)
%add.4 = s32[] add(s32[] %get-tuple-element.2, s32[] %constant.7)
%get-tuple-element.3 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=1
%get-tuple-element.4 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=2
%constant.12 = s64[] constant(1)
%custom-call = s32[] custom-call(s32[] %get-tuple-element.4, s64[] %constant.12), custom_call_target="InsertedByPreviousStep"
%constant.13 = s32[] constant(0)
%constant.10 = s32[] constant(0)
%dynamic-slice.2 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13), dynamic_slice_sizes={1,8,128}
%ar.2 = bf16[1,1,128]{2,1,0} reduce-scatter(bf16[1,8,128]{2,1,0} %dynamic-slice.2), channel_id=2, replica_groups={}, to_apply=%add, dimensions={1}
%ag.2 = bf16[1,8,128]{2,1,0} all-gather(bf16[1,1,128]{2,1,0} %ar.2), channel_id=32, replica_groups={}, dimensions={1}
%dynamic-update-slice.2 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, bf16[1,8,128]{2,1,0} %ag.2, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13)
%dynamic-slice.1 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %get-tuple-element.2, s32[] %constant.10, s32[] %constant.10), dynamic_slice_sizes={1,8,128}
%mul.2 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.1, bf16[1,8,128]{2,1,0} %dynamic-slice.1)
%constant.15 = s32[] constant(0)
%dynamic-update-slice.4 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %dynamic-update-slice.2, bf16[1,8,128]{2,1,0} %mul.2, s32[] %get-tuple-element.2, s32[] %constant.15, s32[] %constant.15)
ROOT %tuple.3 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.4, bf16[3,8,128]{2,1,0} %dynamic-update-slice.4, s32[] %get-tuple-element.2)
}
%while_cond.clone (loop_peel_cond_param: (s32[], bf16[3,8,128], s32[])) -> pred[] {
%loop_peel_cond_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%gte.1 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_cond_param), index=0
%constant.6 = s32[] constant(0)
ROOT %cmp.1 = pred[] compare(s32[] %gte.1, s32[] %constant.6), direction=LT
}
ENTRY %entry (p0: bf16[3,8,128]) -> bf16[3,8,128] {
%c0 = s32[] constant(-3)
%p0 = bf16[3,8,128]{2,1,0} parameter(0)
%tuple.1 = (s32[], bf16[3,8,128]{2,1,0}) tuple(s32[] %c0, bf16[3,8,128]{2,1,0} %p0)
%get-tuple-element.0 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=0
%constant.0 = s32[] constant(1)
%constant.4 = s32[] constant(0)
%add.1 = s32[] add(s32[] %get-tuple-element.0, s32[] %constant.0)
%get-tuple-element.1 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=1
%dynamic-slice.0 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4), dynamic_slice_sizes={1,8,128}
%mul.1 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.0, bf16[1,8,128]{2,1,0} %dynamic-slice.0)
%dynamic-update-slice.0 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, bf16[1,8,128]{2,1,0} %mul.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4)
%tuple.4 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.1, bf16[3,8,128]{2,1,0} %dynamic-update-slice.0, s32[] %get-tuple-element.0)
%while.1 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) while((s32[], bf16[3,8,128]{2,1,0}, s32[]) %tuple.4), condition=%while_cond.clone, body=%while_body.clone
%get-tuple-element.6 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=1
%get-tuple-element.5 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=2
%constant.14 = s32[] constant(0)
%dynamic-slice.3 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14), dynamic_slice_sizes={1,8,128}
%ar.3 = bf16[1,8,128]{2,1,0} all-reduce(bf16[1,8,128]{2,1,0} %dynamic-slice.3), channel_id=3, replica_groups={}, to_apply=%add
ROOT %dynamic-update-slice.3 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, bf16[1,8,128]{2,1,0} %ar.3, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), false, 1,
false,
false,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwoFormat) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,16,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,16,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,16,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,16,128] dynamic-slice(get-tuple-element.396, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,16,128}
mul = bf16[1,16,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,16,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
ds.1 = bf16[1,8,128] dynamic-slice(ar.1, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ds.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,16,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.396)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,16,128] parameter(0)
c1 = bf16[] constant(0)
b1 = bf16[3,8,128] broadcast(c1), dimensions={}
tuple = (s32[], bf16[3,8,128], bf16[3,16,128]) tuple(c0, b1, p0)
while = (s32[], bf16[3,8,128], bf16[3,16,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::DynamicUpdateSlice(
_, op::DynamicSlice(op::AllReduce(op::GetTupleElement()), _, _, _), _,
_, _));
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwoFormatTranspose) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,16,128], bf16[3,16,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,16,128], bf16[3,16,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,16,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,16,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,16,128] dynamic-slice(get-tuple-element.396, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,16,128}
mul = bf16[1,16,128] multiply(dynamic-slice.99, dynamic-slice.99)
reshape.1 = bf16[2,16,64] reshape(mul)
ar.1 = bf16[2,16,64] all-reduce(reshape.1), replica_groups={}, to_apply=add, channel_id=1
transpose.1 = bf16[64,2,16] transpose(ar.1), dimensions={2,0,1}
reshape.2 = bf16[1,16,128] reshape(transpose.1)
dynamic-update-slice.35 = bf16[3,16,128] dynamic-update-slice(get-tuple-element.395, reshape.2, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,16,128], bf16[3,16,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.396)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,16,128] parameter(0)
c1 = bf16[] constant(0)
b1 = bf16[3,16,128] broadcast(c1), dimensions={}
tuple.1 = (s32[], bf16[3,16,128], bf16[3,16,128]) tuple(c0, b1, p0)
while = (s32[], bf16[3,16,128], bf16[3,16,128]) while(tuple.1), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,16,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::DynamicUpdateSlice(
_, op::Reshape(op::Transpose(op::AllReduce(op::GetTupleElement()))),
_, _, _));
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneBackwards) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
const int64_t while_count = absl::c_count_if(
module->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return HloPredicateIsOp<HloOpcode::kWhile>(instruction);
});
EXPECT_EQ(while_count, 1);
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
const HloInstruction* tuple = while_instr->operand(0);
EXPECT_TRUE(tuple->HasControlDependencies());
EXPECT_EQ(tuple->control_predecessors().size(), 1);
const HloInstruction* add_instr = tuple->control_predecessors()[0];
EXPECT_EQ(add_instr->opcode(), HloOpcode::kAdd);
const HloComputation* comp = while_instr->while_body();
const HloInstruction* root_loop = comp->root_instruction();
EXPECT_TRUE(root_loop->HasControlDependencies());
EXPECT_EQ(root_loop->control_predecessors().size(), 1);
const HloInstruction* add_instr_loop = root_loop->control_predecessors()[0];
EXPECT_EQ(add_instr_loop->opcode(), HloOpcode::kAdd);
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsWithTwoDependentClones) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
custom-call = bf16[1,2,128] custom-call(r), custom_call_target="MoveToDevice"
a = bf16[1,2,128] add(custom-call, custom-call), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
auto is_all_gather_or_offloading = [](const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAllGather ||
instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget);
};
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
is_all_gather_or_offloading)
.value());
XLA_VLOG_LINES(1, module->ToString());
const int64_t while_count = absl::c_count_if(
module->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return HloPredicateIsOp<HloOpcode::kWhile>(instruction);
});
EXPECT_EQ(while_count, 1);
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
const HloInstruction* tuple = while_instr->operand(0);
EXPECT_TRUE(tuple->HasControlDependencies());
EXPECT_EQ(tuple->control_predecessors().size(), 1);
const HloInstruction* add_instr = tuple->control_predecessors()[0];
EXPECT_EQ(add_instr->opcode(), HloOpcode::kAdd);
const HloComputation* comp = while_instr->while_body();
const HloInstruction* root_loop = comp->root_instruction();
EXPECT_TRUE(root_loop->HasControlDependencies());
EXPECT_EQ(root_loop->control_predecessors().size(), 1);
const HloInstruction* add_instr_loop = root_loop->control_predecessors()[0];
EXPECT_EQ(add_instr_loop->opcode(), HloOpcode::kAdd);
EXPECT_NE(FindInstruction(module.get(), "custom-call.1"), nullptr);
EXPECT_NE(FindInstruction(module.get(), "custom-call.2"), nullptr);
EXPECT_NE(FindInstruction(module.get(), "ag.1"), nullptr);
EXPECT_NE(FindInstruction(module.get(), "ag.2"), nullptr);
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.395), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,0}},
frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9},{4,10},{5,11},{6,12},{7,13}}"}
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(cp, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(4);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsCollectivePermuteBackwardCycle) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.395), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}},
frontend_attributes={_xla_send_recv_validation="{{7,13},{6,12},{5,11},{4,10},{3,9},{2,8},{1,7},{0,6}}"}
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(cp, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(4);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsModifyOut) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
constant.10 = bf16[] constant(0)
b = bf16[3,1,2,128] broadcast(constant.10), dimensions={}
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, b)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsPlusForward) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) tuple(c0, p0, p1, p0)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsPlusForwardConvertOutput) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = f32[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
get-tuple-element.5 = f32[3,8,128] get-tuple-element(param), index=3
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = f32[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
cvt.0 = bf16[1,8,128] convert(dynamic-slice.99)
mul = bf16[1,8,128] multiply(cvt.0, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
cvt.1 = f32[1,8,128] convert(ar.1)
dynamic-update-slice.35 = f32[3,8,128] dynamic-update-slice(get-tuple-element.395, cvt.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = f32[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) tuple(c0, p0, p1, p0)
while = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = f32[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwise) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseSortFormattingOps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
negate.1 = bf16[1,8,128] negate(ar.1)
negate.2 = bf16[1,8,128] negate(ar.1)
add = bf16[1,8,128] multiply(negate.1, negate.2)
mul3 = bf16[1,8,128] multiply(add, add)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, ElementWiseUser) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul2, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNotFirstIdxSink) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
%c = bf16[] custom-call(), custom_call_target="Boh"
%b = bf16[1,8,128] broadcast(c), dimensions={}
%a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, a, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35), control-predecessors={select.1348}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
const HloComputation* comp = while_instr->while_body();
const HloInstruction* root_loop = comp->root_instruction();
EXPECT_TRUE(root_loop->HasControlDependencies());
EXPECT_EQ(root_loop->control_predecessors().size(), 1);
const HloInstruction* select_instr_loop =
root_loop->control_predecessors()[0];
EXPECT_EQ(select_instr_loop->opcode(), HloOpcode::kSelect);
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneNotFirstIdxSinkCustomCall) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
%c = bf16[] custom-call(), custom_call_target="Boh"
%b = bf16[1,8,128] broadcast(c), dimensions={}
%a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, a, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* all_reduce = module->entry_computation()
->root_instruction()
->operand(0)
->operand(1)
->operand(0)
->operand(0);
EXPECT_EQ(all_reduce->opcode(), HloOpcode::kAllReduce);
EXPECT_EQ(all_reduce->shape().dimensions(0), 3);
}
TEST_F(CollectivePipelinerTest, NotTransformAllGatherWithRecvInChainBackwards) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
after-all = token[] after-all()
recv = (bf16[1,1,2,128], u32[], token[]) recv(after-all), channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}"
}
send = (bf16[1,1,2,128], u32[], token[]) send(get-tuple-element.k, after-all), channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}"
}
send-done = token[] send-done(send), channel_id=2
recv-done = (bf16[1,1,2,128], token[]) recv-done(recv), channel_id=2
recv-data = bf16[1,1,2,128] get-tuple-element(recv-done), index=0
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(recv-data, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
}
TEST_F(CollectivePipelinerTest, TransformRecvSendBackwards) {
constexpr absl::string_view hlo_string = R"(
HloModule module
cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
p = get-tuple-element(%param), index=1
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(p, after-all), channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}",
_xla_send_recv_pipeline="0"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-done = token[] send-done(send), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond, backend_config="{\"known_trip_count\":{\"n\":\"25\"}}"
ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
auto should_pipeline = [](const HloInstruction* instruction) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone>(instruction)) return false;
const HloRecvDoneInstruction* recv_done =
dynamic_cast<const HloRecvDoneInstruction*>(instruction);
if (recv_done->is_host_transfer()) return false;
return (recv_done->user_count() == 1 && recv_done->parent() != nullptr &&
recv_done->users()[0] != recv_done->parent()->root_instruction());
};
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
should_pipeline)
.value());
XLA_VLOG_LINES(10, module->ToString());
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
EXPECT_EQ(recv1->channel_id(), send1->channel_id());
}
TEST_F(CollectivePipelinerTest,
TransformRecvSendBackwardsWithLoopVariantParameter) {
constexpr absl::string_view hlo_string = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_other_attr="0"
}
after-all.0.s = token[] after-all()
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_other_attr="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
auto should_pipeline = [](const HloInstruction* instr) {
if (!HloPredicateIsOp<HloOpcode::kRecv>(instr) &&
!HloPredicateIsOp<HloOpcode::kSend>(instr))
return false;
const HloSendRecvInstruction* send_recv =
dynamic_cast<const HloSendRecvInstruction*>(instr);
return (send_recv->user_count() == 1 && send_recv->parent() != nullptr &&
send_recv->users()[0] != send_recv->parent()->root_instruction());
};
auto should_allow_loop_variant_parameter = [](const HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->opcode() == HloOpcode::kParameter);
return true;
};
const char* kAttr = "_xla_other_attr";
auto postprocess_peeled = [&](HloInstruction* instr) {
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kAttr] = "1";
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
};
auto postprocess_rotated = [&](HloInstruction* instr) {
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kAttr] = "2";
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
};
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
should_pipeline,
HloPredicateTrue,
HloPredicateTrue,
should_allow_loop_variant_parameter,
postprocess_peeled, postprocess_rotated)
.value());
XLA_VLOG_LINES(10, module->ToString());
auto while_op = FindInstruction(module.get(), "while");
EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile);
EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5);
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
EXPECT_EQ(recv1->channel_id(), send1->channel_id());
const char* kSourceTarget = "_xla_send_recv_source_target_pairs={{3,0}}";
const char* kPeeledAttr = "_xla_other_attr=\"1\"";
const char* kRotatedAttr = "_xla_other_attr=\"2\"";
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr));
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseMerge) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
ar.2 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.2)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseFeedTwo) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
ar.2 = bf16[1,8,128] all-reduce(ar.1), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, bc), control-predecessors={ar.1}
mul3 = bf16[1,8,128] multiply(mul2, ar.2)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5), control-predecessors={ar.1}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseFeedTwoWithReduce) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.2 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
bm = bf16[1,1,8,128] broadcast(mul), dimensions={1,2,3}
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,1,8,128] all-reduce(bm), replica_groups={}, to_apply=add, channel_id=1
ar.2 = bf16[1,1,8,128] all-reduce(ar.1), replica_groups={}, to_apply=add, channel_id=2
red.1 = bf16[1,8,128] reduce(ar.1, c2), to_apply=add.1, dimensions={0}
red.2 = bf16[1,8,128] reduce(ar.2, c2), to_apply=add.2, dimensions={0}
mul2 = bf16[1,8,128] multiply(red.1, bc), control-predecessors={ar.1}
mul3 = bf16[1,8,128] multiply(mul2, red.2)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5), control-predecessors={ar.1}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, PipelinedReduceScatterCanPassVerifier) {
constexpr absl::string_view hlo_string = R"(
HloModule module
to_apply0 {
Arg_0.732 = bf16[] parameter(0)
Arg_1.733 = bf16[] parameter(1)
ROOT add.734 = bf16[] add(Arg_0.732, Arg_1.733)
}
body {
p2 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) parameter(0)
gte2 = bf16[3,4096,4096]{2,1,0} get-tuple-element(p2), index=1
gte3 = bf16[10,512,3,4096]{3,2,1,0} get-tuple-element(p2), index=2
c2 = s32[] constant(9)
gte4 = s32[] get-tuple-element(p2), index=0
sub0 = s32[] subtract(c2, gte4)
c3 = s32[] constant(0)
comp1 = pred[] compare(sub0, c3), direction=LT
c4 = s32[] constant(19)
sub2 = s32[] subtract(c4, gte4)
sel0 = s32[] select(comp1, sub2, sub0)
rsp0 = bf16[3,4096,4096]{2,1,0} reshape(gte2)
rs0 = bf16[3,4096,512]{2,1,0} reduce-scatter(rsp0), channel_id=75, replica_groups={{0,1,2,3}}, dimensions={2}, to_apply=to_apply0
tran0 = bf16[512,3,4096]{0,2,1} transpose(rs0), dimensions={2,0,1}
rsp1 = bf16[1,512,3,4096]{3,2,1,0} reshape(tran0)
dus0 = bf16[10,512,3,4096]{3,2,1,0} dynamic-update-slice(gte3, rsp1, sel0, c3, c3, c3)
c5 = s32[] constant(1)
add0 = s32[] add(gte4, c5)
ROOT t1 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) tuple(add0, rsp0, dus0)
}
condition {
cond_p1 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) parameter(0)
gte1 = s32[] get-tuple-element(cond_p1), index=0
c1 = s32[] constant(9)
ROOT comp0 = pred[] compare(gte1, c1), direction=LT
}
ENTRY main.3813_spmd {
p0 = bf16[3,4096,4096]{2,1,0} parameter(0)
p1 = bf16[10,512,3,4096]{3,2,1,0} parameter(1)
c0 = s32[] constant(0)
t0 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) tuple(c0, p0, p1)
w0 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) while(t0), condition=condition, body=body
ROOT gte0 = bf16[3,4096,4096]{2,1,0} get-tuple-element(w0), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kReduceScatter>)
.value());
XLA_VLOG_LINES(1, module->ToString());
HloVerifier verifier(false,
true);
ASSERT_IS_OK(verifier.Run(module.get()).status());
}
TEST_F(CollectivePipelinerTest,
PipelineBackwardIncludeInvariantMultiConsumerInChain) {
constexpr absl::string_view hlo_string = R"(
HloModule module
while_cond {
param = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[1,8,2048,32768]{3,2,1,0} get-tuple-element(param), index=1
get-tuple-element.397 = bf16[1,8,2048,32768]{3,2,1,0} get-tuple-element(param), index=2
constant.1 = bf16[] constant(2)
broadcast.3593 = bf16[1,8,2048,32768]{3,2,1,0} broadcast(constant.1), dimensions={}
add.2 = bf16[1,8,2048,32768]{3,2,1,0} add(broadcast.3593, get-tuple-element.395)
all-gather.1 = bf16[1,64,2048,32768]{3,2,1,0} all-gather(broadcast.3593), channel_id=1, dimensions={1}, replica_groups={}
slice.2 = bf16[1,8,2048,32768]{3,2,1,0} slice(all-gather.1), slice={[0:1], [8:16], [0:2048], [0:32768]}
constant.2 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2)
ROOT tuple = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) tuple(add.230, add.2, slice.2)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[1,8,2048,32768]{3,2,1,0} parameter(0)
p1 = bf16[1,8,2048,32768]{3,2,1,0} parameter(1)
tuple = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) tuple(c0, p0, p1)
while = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[1,8,2048,32768]{3,2,1,0} get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
std::nullopt,
std::nullopt,
true)
.value());
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_THAT(while_instr, op::While(op::Tuple(_, _, _, op::AllGather(), _)));
HloInstruction* root = while_instr->while_body()->root_instruction();
EXPECT_THAT(root, op::Tuple(_, _, _, op::AllGather(), _));
auto ref_module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(
RunOptimizer(
ref_module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
std::nullopt,
std::nullopt,
false)
.value());
}
TEST_F(CollectivePipelinerTest, BroadcastAsFormattingOp) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_GE(while_instr->users().size(), 2);
EXPECT_TRUE(
absl::c_any_of(while_instr->users(), [](const HloInstruction* user) {
return absl::c_any_of(
user->users(), [](const HloInstruction* user_user) {
return user_user->opcode() == HloOpcode::kAllReduce;
});
}));
}
TEST_F(CollectivePipelinerTest, ForwardSinkDependentPipelineableCollectives) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
ar.2 = bf16[1,8,128] all-reduce(reduce), replica_groups={}, to_apply=add, channel_id=2
c1 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c1)
mul1 = bf16[1,8,128] multiply(ar.2, bc)
mul3 = bf16[1,8,128] multiply(mul1, ar.2)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul3, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_use_spmd_partitioning(true);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink,
HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicateIsNotOp<HloOpcode::kAllReduce>)
.value());
XLA_VLOG_LINES(1, module->ToString());
std::function<const HloInstruction*(const HloInstruction*)> find_all_reduce =
[&](const HloInstruction* i) -> const HloInstruction* {
std::queue<const HloInstruction*> queue;
queue.push(i);
absl::flat_hash_set<HloInstruction*> visited;
while (!queue.empty()) {
const HloInstruction* curr_inst = queue.front();
queue.pop();
for (HloInstruction* operand : curr_inst->operands()) {
if (operand->opcode() == HloOpcode::kAllReduce) {
return operand;
}
if (visited.insert(operand).second) {
queue.push(operand);
}
}
}
return nullptr;
};
const HloInstruction* all_reduce1 =
find_all_reduce(module->entry_computation()->root_instruction());
EXPECT_NE(all_reduce1, nullptr);
const HloInstruction* all_reduce2 = find_all_reduce(all_reduce1);
EXPECT_NE(all_reduce2, nullptr);
EXPECT_THAT(all_reduce2, op::AllReduce(op::GetTupleElement(op::While())));
}
TEST_F(CollectivePipelinerTest,
ForwardSinkDependentPipelineableCollectivesNotLastRun) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
ar.2 = bf16[1,8,128] all-reduce(reduce), replica_groups={}, to_apply=add, channel_id=2
c1 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c1)
mul1 = bf16[1,8,128] multiply(ar.2, bc)
mul3 = bf16[1,8,128] multiply(mul1, ar.2)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul3, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_use_spmd_partitioning(true);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink,
HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicateIsNotOp<HloOpcode::kAllReduce>)
.value());
XLA_VLOG_LINES(1, module->ToString());
std::function<const HloInstruction*(const HloInstruction*)> find_all_reduce =
[&](const HloInstruction* i) -> const HloInstruction* {
std::queue<const HloInstruction*> queue;
queue.push(i);
absl::flat_hash_set<HloInstruction*> visited;
while (!queue.empty()) {
const HloInstruction* curr_inst = queue.front();
queue.pop();
for (HloInstruction* operand : curr_inst->operands()) {
if (operand->opcode() == HloOpcode::kAllReduce) {
return operand;
}
if (visited.insert(operand).second) {
queue.push(operand);
}
}
}
return nullptr;
};
const HloInstruction* all_reduce1 =
find_all_reduce(module->entry_computation()->root_instruction());
EXPECT_NE(all_reduce1, nullptr);
const HloInstruction* all_reduce2 = find_all_reduce(all_reduce1);
EXPECT_NE(all_reduce2, nullptr);
EXPECT_THAT(all_reduce2, op::AllReduce(op::GetTupleElement(op::While())));
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
CHECK_NE(while_instr, nullptr);
const HloInstruction* dynamic_update_slice =
while_instr->while_body()->root_instruction()->operands().back();
CHECK_EQ(dynamic_update_slice->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* custom_call = dynamic_update_slice->operand(1);
CHECK(custom_call->IsCustomCall("SunkByPreviousStep"));
}
TEST_F(CollectivePipelinerTest, ForwardSinkFirstDimNotMatchingLoopCount) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[5,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[5,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[5,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
c = bf16[] custom-call(), custom_call_target="Boh"
b = bf16[1,8,128] broadcast(c), dimensions={}
a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[5,8,128] dynamic-update-slice(get-tuple-element.395, a, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[5,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35), control-predecessors={select.1348}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[5,8,128] parameter(0)
p1 = bf16[3,8,128] parameter(1)
tuple = (s32[], bf16[5,8,128], bf16[3,8,128]) tuple(c0, p0, p1)
while = (s32[], bf16[5,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[5,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
}
TEST_F(CollectivePipelinerTest, ForwardSinkNotFirstDim) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
%c = bf16[] custom-call(), custom_call_target="Boh"
%b = bf16[1,8,128] broadcast(c), dimensions={}
%a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, a, constant.2561, select.1348, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35), control-predecessors={select.1348}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
}
TEST_F(CollectivePipelinerTest, CollectiveWithMultipleDUS) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_TRUE(
absl::c_any_of(while_instr->users(), [](const HloInstruction* user) {
return absl::c_any_of(
user->users(), [](const HloInstruction* user_user) {
return user_user->opcode() == HloOpcode::kAllReduce;
});
}));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kGetTupleElement);
const HloInstruction* new_tuple =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_EQ(new_tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(absl::c_count_if(new_tuple->operands(),
[](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kReshape;
}),
2);
}
TEST_F(CollectivePipelinerTest, CollectiveWithMultipleDUSNotLastRun) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
CHECK_NE(while_instr, nullptr);
EXPECT_TRUE(
absl::c_any_of(while_instr->users(), [](const HloInstruction* user) {
return absl::c_any_of(
user->users(), [](const HloInstruction* user_user) {
return user_user->opcode() == HloOpcode::kAllReduce;
});
}));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kGetTupleElement);
const HloInstruction* new_tuple =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_EQ(new_tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(absl::c_count_if(new_tuple->operands(),
[](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kReshape;
}),
2);
const HloInstruction* dynamic_update_slice =
while_instr->while_body()->root_instruction()->operand(4);
CHECK_EQ(dynamic_update_slice->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* custom_call = dynamic_update_slice->operand(1);
CHECK(custom_call->IsCustomCall("SunkByPreviousStep"));
}
TEST_F(CollectivePipelinerTest, CollectiveWithMultipleDUSSameBuffer) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
}
TEST_F(CollectivePipelinerTest, MergeTwoCollectivesEachWithTwoDUS) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.397 = bf16[3,8,128] get-tuple-element(param), index=3
get-tuple-element.398 = bf16[3,8,128] get-tuple-element(param), index=4
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=5
get-tuple-element.36 = bf16[3,8,128] get-tuple-element(param), index=6
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
dynamic-slice.100 = bf16[1,8,128] dynamic-slice(get-tuple-element.36, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul.1 = bf16[1,8,128] multiply(dynamic-slice.100, dynamic-slice.99)
ar.2 = bf16[1,8,128] all-reduce(mul.1), replica_groups={}, to_apply=add, channel_id=1
divide = bf16[1,8,128] divide(ar.1, ar.2)
dynamic-update-slice.37 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.397, divide, select.1348, constant.2561, constant.2561)
mul.2 = bf16[1,8,128] multiply(ar.2, ar.2)
abs = bf16[1,8,128] abs(mul.2)
dynamic-update-slice.38 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.398, abs, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, dynamic-update-slice.37, dynamic-update-slice.38, get-tuple-element.35, get-tuple-element.36)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,8,128] parameter(1)
p2 = bf16[3,8,128] parameter(2)
p3 = bf16[3,8,128] parameter(3)
p4 = bf16[3,8,128] parameter(4)
p5 = bf16[3,8,128] parameter(5)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p1, p2, p3, p4, p5)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Tuple(
op::GetTupleElement(op::While()), op::Reshape(op::Reduce()),
op::Reshape(op::Multiply()), op::Reshape(op::Divide()),
op::Reshape(op::Abs()), op::GetTupleElement(op::While()),
op::GetTupleElement(op::While()))));
}
TEST_F(CollectivePipelinerTest, MergeTwoCollectivesEachWithTwoDUSNotLastRun) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.397 = bf16[3,8,128] get-tuple-element(param), index=3
get-tuple-element.398 = bf16[3,8,128] get-tuple-element(param), index=4
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=5
get-tuple-element.36 = bf16[3,8,128] get-tuple-element(param), index=6
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
dynamic-slice.100 = bf16[1,8,128] dynamic-slice(get-tuple-element.36, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul.1 = bf16[1,8,128] multiply(dynamic-slice.100, dynamic-slice.99)
ar.2 = bf16[1,8,128] all-reduce(mul.1), replica_groups={}, to_apply=add, channel_id=1
divide = bf16[1,8,128] divide(ar.1, ar.2)
dynamic-update-slice.37 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.397, divide, select.1348, constant.2561, constant.2561)
mul.2 = bf16[1,8,128] multiply(ar.2, ar.2)
abs = bf16[1,8,128] abs(mul.2)
dynamic-update-slice.38 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.398, abs, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, dynamic-update-slice.37, dynamic-update-slice.38, get-tuple-element.35, get-tuple-element.36)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,8,128] parameter(1)
p2 = bf16[3,8,128] parameter(2)
p3 = bf16[3,8,128] parameter(3)
p4 = bf16[3,8,128] parameter(4)
p5 = bf16[3,8,128] parameter(5)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p1, p2, p3, p4, p5)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Tuple(
op::GetTupleElement(op::While()), op::Reshape(op::Reduce()),
op::Reshape(op::Multiply()), op::Reshape(op::Divide()),
op::Reshape(op::Abs()), op::GetTupleElement(op::While()),
op::GetTupleElement(op::While()))));
std::function<bool(const HloInstruction*)> is_dus_with_custom_call =
[&](const HloInstruction* inst) -> bool {
if (inst->opcode() != HloOpcode::kDynamicUpdateSlice) {
return false;
}
return inst->operand(1)->IsCustomCall("SunkByPreviousStep");
};
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
CHECK_NE(while_instr, nullptr);
CHECK(is_dus_with_custom_call(
while_instr->while_body()->root_instruction()->operand(7)));
CHECK(is_dus_with_custom_call(
while_instr->while_body()->root_instruction()->operand(8)));
}
TEST_F(CollectivePipelinerTest, NoRedundantBroadcastsInFormattingOps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
convert = bf16[] convert(add.232)
broadcast = bf16[1,8,128] broadcast(convert)
add.1 = bf16[1,8,128] add(ar.1, broadcast)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, add.1, select.1348, constant.2561, constant.2561)
ar.2 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add.1, channel_id=2
add.2 = bf16[1,8,128] add(ar.2, broadcast)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, add.2, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
[](const HloInstruction* instr) {
return instr->opcode() ==
HloOpcode::kBroadcast &&
instr->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
instr->operand(0)->operand(0)->opcode() ==
HloOpcode::kWhile;
}),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_pipeliner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_pipeliner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
36859a3b-714d-4582-82d8-851c87f28936 | cpp | tensorflow/tensorflow | conditional_to_select | third_party/xla/xla/service/conditional_to_select.cc | third_party/xla/xla/service/conditional_to_select_test.cc | #include "xla/service/conditional_to_select.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
static absl::StatusOr<bool> DoConditionalToSelect(HloInstruction* conditional) {
if (conditional->true_computation()->HasSideEffect() ||
conditional->false_computation()->HasSideEffect()) {
VLOG(1) << "Not transforming conditional; branches have side effects:"
<< conditional->ToString();
return false;
}
auto computation = conditional->parent();
HloInstruction* if_call_op =
computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(1)},
conditional->true_computation()));
conditional->SetupDerivedInstruction(if_call_op);
HloInstruction* else_call_op =
computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(2)},
conditional->false_computation()));
conditional->SetupDerivedInstruction(else_call_op);
HloInstruction* condition = conditional->mutable_operand(0);
if (else_call_op->shape().IsTuple()) {
VLOG(1) << "Not transforming tuples to 'select'";
return false;
}
TF_ASSIGN_OR_RETURN(
HloInstruction * select_op,
MakeSelectHlo(condition, if_call_op, else_call_op, conditional));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, select_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(if_call_op).status());
TF_RETURN_IF_ERROR(CallInliner::Inline(else_call_op).status());
return true;
}
absl::StatusOr<bool> ConditionalToSelect::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
bool did_mutate = false;
VLOG(1) << "Running conditional-to-select pass";
TF_RETURN_IF_ERROR(
call_graph->VisitNodes([&](const CallGraphNode& node) -> absl::Status {
std::vector<HloInstruction*> ToInline;
if (node.context() != CallContext::kEmbedded) {
return absl::OkStatus();
}
for (const CallSite& callsite : node.callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kConditional) {
VLOG(1) << "Visiting conditional: " << callsite.ToString();
HloInstruction* conditional = callsite.instruction();
TF_ASSIGN_OR_RETURN(bool result,
DoConditionalToSelect(conditional));
did_mutate |= result;
}
}
return absl::OkStatus();
}));
return did_mutate;
}
} | #include "xla/service/conditional_to_select.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ConditionalToSelectTest = HloTestBase;
using ::testing::_;
TEST_F(ConditionalToSelectTest, MapConditionalConstants) {
const std::string hlo_text = R"(
HloModule MapConditionalConstants
if {
%pif = () parameter(0)
ROOT %cif = f32[] constant(0)
}
else {
%pelse = () parameter(0)
ROOT %celse = f32[] constant(1)
}
mapped {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
%lt = pred[] compare(%a, %b), direction=LT
%t = () tuple()
ROOT %conditional = f32[] conditional(%lt, %t, %t), true_computation=if, false_computation=else
}
ENTRY comp {
%p1 = f32[1000]{0} parameter(0)
%p2 = f32[1000]{0} parameter(1)
ROOT %mapped = f32[1000]{0} map(%p1, %p2), dimensions={0}, to_apply=mapped
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ConditionalToSelect pass;
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_EQ(root->opcode(), HloOpcode::kMap);
HloComputation* mapped = root->called_computations()[0];
EXPECT_THAT(mapped->root_instruction(),
op::Select(op::Lt(op::Parameter(0), op::Parameter(1)),
op::Constant(), op::Constant()));
}
TEST_F(ConditionalToSelectTest, MapConditionalNonScalar) {
const std::string hlo_text = R"(
HloModule MapConditionalNonScalar
if {
%pif = () parameter(0)
%zero = f32[] constant(0)
ROOT %zero_broadcasted = f32[2,2]{1,0} broadcast(%zero), dimensions={}
}
else {
%pelse = () parameter(0)
%one = f32[] constant(0)
ROOT %one_broadcasted = f32[2,2]{1,0} broadcast(%one), dimensions={}
}
add {
%add_lhs = f32[] parameter(0)
%add_rhs = f32[] parameter(1)
ROOT %add = f32[] add(%add_lhs, %add_rhs)
}
mapped {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
%lt = pred[] compare(%a, %b), direction=LT
%t = () tuple()
%conditional = f32[2,2]{1,0} conditional(%lt, %t, %t), true_computation=if, false_computation=else
%zero = f32[] constant(0)
ROOT %reduced = f32[] reduce(%conditional, %zero), dimensions={0,1}, to_apply=add
}
ENTRY comp {
%p1 = f32[1000]{0} parameter(0)
%p2 = f32[1000]{0} parameter(1)
ROOT %mapped = f32[1000]{0} map(%p1, %p2), dimensions={0}, to_apply=mapped
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ConditionalToSelect pass;
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_EQ(root->opcode(), HloOpcode::kMap);
HloComputation* mapped = root->called_computations()[0];
EXPECT_THAT(
mapped->root_instruction(),
op::Reduce(
op::Select(op::Broadcast(op::Lt(op::Parameter(0), op::Parameter(1))),
_, _),
_));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_to_select.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_to_select_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
210f416b-112b-4ae1-8653-76f0724b1b91 | cpp | tensorflow/tensorflow | hlo_execution_profile | third_party/xla/xla/service/hlo_execution_profile.cc | third_party/xla/xla/service/hlo_execution_profile_test.cc | #include "xla/service/hlo_execution_profile.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_execution_profile_data.pb.h"
#include "xla/service/human_readable_profile_builder.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
HloProfileIndexMap::HloProfileIndexMap(
const HloModule& module, absl::Span<const std::string> extra_metrics) {
size_t current_profile_index = 0;
for (xla::HloComputation* computation : module.MakeComputationPostOrder()) {
InsertOrDie(&computation_to_profile_idx_, computation,
current_profile_index++);
for (const HloInstruction* instruction : computation->instructions()) {
InsertOrDie(&instruction_to_profile_idx_, instruction,
current_profile_index++);
}
}
for (const std::string& key : extra_metrics) {
InsertOrDie(&extra_metric_to_profile_idx_, key, current_profile_index++);
}
}
std::unique_ptr<HloProfilePrinterData> CreateHloProfilePrinterData(
const HloProfileIndexMap& hlo_profile_index_map,
const HloCostAnalysis& cost_analysis,
absl::string_view entry_computation_name) {
using HloComputationInfo = HloProfilePrinterData::HloComputationInfo;
using HloInstructionInfo = HloProfilePrinterData::HloInstructionInfo;
size_t profile_counters_size = hlo_profile_index_map.total_count();
std::unique_ptr<HloProfilePrinterData> profile_printer_data =
std::make_unique<HloProfilePrinterData>();
profile_printer_data->set_profile_counters_size(profile_counters_size);
profile_printer_data->mutable_computation_infos()->Reserve(
hlo_profile_index_map.computation_count());
const auto& computation_to_profile_idx_map =
hlo_profile_index_map.computation_to_profile_idx();
std::vector<std::pair<const HloComputation*, int64_t>>
computation_and_profile_idx_list(computation_to_profile_idx_map.begin(),
computation_to_profile_idx_map.end());
absl::c_sort(computation_and_profile_idx_list,
[](const std::pair<const HloComputation*, int64_t>& left,
const std::pair<const HloComputation*, int64_t>& right) {
return left.second < right.second;
});
for (const auto& pair : computation_and_profile_idx_list) {
CHECK_LT(pair.second, profile_counters_size);
const HloComputation* computation = pair.first;
HloComputationInfo* computation_info =
profile_printer_data->add_computation_infos();
*computation_info->mutable_name() = std::string(computation->name());
computation_info->set_profile_index(pair.second);
computation_info->mutable_instruction_infos()->Reserve(
computation->instruction_count());
for (const HloInstruction* hlo : computation->instructions()) {
HloInstructionInfo* instruction_info =
computation_info->add_instruction_infos();
instruction_info->set_long_name(hlo->ToString());
instruction_info->set_short_name(hlo->ToString(
HloPrintOptions().set_compact_operands(true).set_print_operand_names(
false)));
instruction_info->set_category(hlo->ToCategory());
instruction_info->set_flop_count(cost_analysis.flop_count(*hlo));
instruction_info->set_transcendental_count(
cost_analysis.transcendental_count(*hlo));
instruction_info->set_bytes_accessed(cost_analysis.bytes_accessed(*hlo));
instruction_info->set_optimal_seconds(
cost_analysis.optimal_seconds(*hlo));
instruction_info->set_profile_index(
hlo_profile_index_map.GetProfileIndexFor(*hlo));
}
}
for (const auto& pair : hlo_profile_index_map.extra_metric_to_profile_idx()) {
profile_printer_data->mutable_extra_metrics()->insert(
{pair.first, pair.second});
}
*profile_printer_data->mutable_entry_computation() =
std::string(entry_computation_name);
return profile_printer_data;
}
HloExecutionProfile::HloExecutionProfile(
const HloProfilePrinterData* hlo_profile_printer_data,
const HloProfileIndexMap* hlo_profile_index_map)
: hlo_profile_printer_data_(*hlo_profile_printer_data),
hlo_profile_index_map_(*hlo_profile_index_map),
profile_counters_(
hlo_profile_index_map_.total_count(),
0) {}
void HloExecutionProfile::SetCyclesTakenBy(const HloInstruction* hlo,
uint64_t cycles_taken) {
SetCyclesTakenBy(hlo_profile_index_map_.GetProfileIndexFor(*hlo),
cycles_taken);
}
void HloExecutionProfile::SetCyclesTakenBy(size_t index,
uint64_t cycles_taken) {
profile_counters_[index] = cycles_taken;
}
uint64_t HloExecutionProfile::GetCyclesTakenBy(
const HloInstruction& hlo) const {
return GetCyclesTakenBy(hlo_profile_index_map_.GetProfileIndexFor(hlo));
}
uint64_t HloExecutionProfile::GetCyclesTakenBy(size_t index) const {
return profile_counters_[index];
}
} | #include "xla/service/hlo_execution_profile.h"
#include "absl/strings/str_cat.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using absl::StrCat;
using ::testing::AllOf;
using ::testing::ContainsRegex;
class HloExecutionProfileTest : public HloTestBase {};
TEST_F(HloExecutionProfileTest, Basic) {
auto hlo_module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
lhs = f32[30,30]{1,0} parameter(0)
rhs = f32[30,30]{1,0} parameter(1)
add = f32[30,30]{1,0} add(lhs, rhs)
ROOT dot = f32[30,30]{1,0} dot(lhs, add), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const HloInstruction* dot_instruction =
hlo_module->entry_computation()->root_instruction();
const HloInstruction* add_instruction = dot_instruction->operand(1);
Shape shape = ShapeUtil::MakeShape(F32, {30, 30});
auto shape_size_function = [&](const Shape& shape) {
const int64_t pointer_size = 8;
if (shape.IsOpaque()) {
return pointer_size;
}
return ShapeUtil::ByteSizeOf(shape, pointer_size);
};
HloCostAnalysis cost_analysis(shape_size_function);
HloProfileIndexMap profile_index_map(*hlo_module);
std::unique_ptr<HloProfilePrinterData> profile_printer =
CreateHloProfilePrinterData(profile_index_map, cost_analysis,
hlo_module->entry_computation()->name());
HloExecutionProfile execution_profile(profile_printer.get(),
&profile_index_map);
const int64_t add_cycles = 1000;
const int64_t dot_cycles = 4000;
execution_profile.SetCyclesTakenBy(add_instruction, add_cycles);
execution_profile.SetCyclesTakenBy(dot_instruction, dot_cycles);
float clock_rate_ghz = backend()
.default_stream_executor()
->GetDeviceDescription()
.clock_rate_ghz();
EXPECT_THAT(execution_profile.ToString(clock_rate_ghz),
AllOf(ContainsRegex(StrCat(dot_cycles, " cycles.*%",
dot_instruction->name())),
ContainsRegex(StrCat(add_cycles, " cycles.*%",
add_instruction->name()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_execution_profile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_execution_profile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cfe89afd-b300-4dca-84b3-14c19f75489f | cpp | tensorflow/tensorflow | hlo_dataflow_analysis | third_party/xla/xla/service/hlo_dataflow_analysis.cc | third_party/xla/xla/service/hlo_dataflow_analysis_test.cc | #include "xla/service/hlo_dataflow_analysis.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
int64_t CalculatePostOrderScheduleHelper(
const HloComputation* comp, int64_t start_ordinal,
absl::flat_hash_map<HloInstruction*, int64_t>* ordinal_map) {
int64_t ordinal = start_ordinal;
for (HloInstruction* instruction : comp->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional) {
for (const HloComputation* called_computation :
instruction->called_computations()) {
ordinal = CalculatePostOrderScheduleHelper(called_computation, ordinal,
ordinal_map);
}
}
if (instruction->opcode() == HloOpcode::kWhile) {
ordinal = CalculatePostOrderScheduleHelper(instruction->while_condition(),
ordinal, ordinal_map);
ordinal = CalculatePostOrderScheduleHelper(instruction->while_body(),
ordinal, ordinal_map);
}
ordinal_map->insert({instruction, ordinal++});
}
return ordinal;
}
absl::flat_hash_map<HloInstruction*, int64_t> CalculatePostOrderSchedule(
const HloModule& module) {
absl::flat_hash_map<HloInstruction*, int64_t> map;
CalculatePostOrderScheduleHelper(module.entry_computation(), 0, &map);
return map;
}
}
using absl::StrAppend;
using absl::StrCat;
HloDataflowAnalysis::HloDataflowAnalysis(
const HloModule& module, bool ssa_form, bool bitcast_defines_value,
const CanShareBuffer& can_share_buffer, const ForwardsValue& forwards_value,
absl::flat_hash_set<absl::string_view> execution_threads)
: module_(module),
execution_threads_(std::move(execution_threads)),
ssa_form_(ssa_form),
bitcast_defines_value_(bitcast_defines_value),
call_graph_(CallGraph::Build(&module)),
can_share_buffer_(can_share_buffer),
forwards_value_(forwards_value) {}
bool HloDataflowAnalysis::AreTransitiveUsesElementwiseOrTuple(
const HloInstruction* inst) {
absl::flat_hash_set<const HloInstruction*> visited;
absl::InlinedVector<const HloInstruction*, 4> stack;
stack.push_back(inst);
while (!stack.empty()) {
const HloInstruction* current = stack.back();
stack.pop_back();
visited.insert(current);
for (const HloInstruction* user : current->users()) {
for (const int64_t use_index : user->OperandIndices(current)) {
if (!user->IsElementwiseOnOperand(use_index) &&
user->opcode() != HloOpcode::kTuple) {
return false;
}
}
if (!visited.contains(user)) {
stack.push_back(user);
}
}
}
return true;
}
namespace {
bool Is1dSliceWithoutStrides(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kSlice &&
1 == instr->slice_starts().size() &&
1 == instr->slice_limits().size() &&
1 == instr->slice_strides().size() &&
1 == instr->slice_strides().at(0);
}
bool IsSliceInputFusion(const HloInstruction& unnested_hlo) {
if (!unnested_hlo.IsInputFusion()) {
return false;
}
const HloInstruction* root = unnested_hlo.fused_expression_root();
if (root->opcode() != HloOpcode::kTuple) {
return false;
}
return absl::c_all_of(root->operands(), [](const HloInstruction* instr) {
return Is1dSliceWithoutStrides(instr);
});
}
struct ConcatUsageInfo {
const HloInstruction* prev_concat;
int64_t concat_opnd_idx;
const HloInstruction* slice_to_recover_opnd;
};
std::optional<ConcatUsageInfo> ConcatIsEffectivelyElementwise(
const HloInstruction& concat, const HloInstruction& operand,
const ConcatUsageInfo& info) {
std::vector<HloInstruction*> users = concat.users();
if (!absl::c_all_of(users, Is1dSliceWithoutStrides)) {
return std::optional<ConcatUsageInfo>();
}
if (users.size() != concat.operand_count() ||
concat.operand_count() != concat.unique_operands().size()) {
return std::optional<ConcatUsageInfo>();
}
absl::c_sort(users, [](const HloInstruction* a, const HloInstruction* b) {
return a->slice_starts().at(0) < b->slice_starts().at(0);
});
int64_t prev_limit = 0;
for (int64_t i = 0; i < users.size(); ++i) {
const HloInstruction* u = users[i];
int64_t slice_size = u->slice_limits().at(0) - u->slice_starts().at(0);
if (u->slice_starts().at(0) != prev_limit ||
slice_size != ShapeUtil::ElementsIn(concat.operand(i)->shape())) {
return std::optional<ConcatUsageInfo>();
}
prev_limit = u->slice_limits().at(0);
}
int64_t operand_idx = concat.operand_index(&operand);
if (info.prev_concat != nullptr) {
bool is_concat_identical = info.prev_concat->Identical(
concat,
[](const HloInstruction*, const HloInstruction*) {
return true;
});
if (!is_concat_identical || info.concat_opnd_idx != operand_idx) {
return std::optional<ConcatUsageInfo>();
}
}
const HloInstruction* slice_to_recover_opnd = users.at(operand_idx);
return std::optional<ConcatUsageInfo>(
ConcatUsageInfo{&concat, operand_idx, slice_to_recover_opnd});
}
bool AreTransitiveUsesEffectivelyElementwise(const HloInstruction* param,
const HloInstruction* root_tuple,
const ShapeIndex& out_shape_idx) {
CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple);
CHECK_EQ(out_shape_idx.size(), 1);
absl::flat_hash_set<const HloInstruction*> visited;
absl::InlinedVector<const HloInstruction*, 4> stack;
stack.push_back(param);
ConcatUsageInfo concat_usage_info{nullptr, 0, nullptr};
bool is_output_reachable = false;
while (!stack.empty()) {
const HloInstruction* current = stack.back();
stack.pop_back();
visited.insert(current);
for (const HloInstruction* user : current->users()) {
VLOG(3) << "Visiting: " << user->ToString();
switch (user->opcode()) {
case HloOpcode::kTuple:
if (user == root_tuple &&
current == root_tuple->operand(out_shape_idx.back())) {
is_output_reachable = true;
}
break;
case HloOpcode::kReshape:
if (!ShapeUtil::ReshapeIsBitcast(current->shape(), user->shape())) {
return false;
}
break;
case HloOpcode::kConcatenate: {
std::optional<ConcatUsageInfo> optional_concat_info =
ConcatIsEffectivelyElementwise(*user, *current,
concat_usage_info);
if (!optional_concat_info) {
return false;
}
concat_usage_info = *optional_concat_info;
CHECK(!visited.contains(concat_usage_info.slice_to_recover_opnd));
stack.push_back(concat_usage_info.slice_to_recover_opnd);
continue;
}
default:
for (const int64_t use_index : user->OperandIndices(current)) {
if (!user->IsElementwiseOnOperand(use_index)) {
return false;
}
}
if (!LayoutUtil::Equal(current->shape().layout(),
user->shape().layout())) {
return false;
}
break;
}
if (!visited.contains(user)) {
stack.push_back(user);
}
}
}
return is_output_reachable;
}
}
bool HloDataflowAnalysis::ValueIsDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
const HloValueSet& value_set = GetValueSet(instruction, index);
if (value_set.values().size() != 1) {
return false;
}
return value_set.GetUniqueValue().defining_instruction() == instruction;
}
const HloValue& HloDataflowAnalysis::GetValueDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
CHECK(ValueIsDefinedAt(instruction, index)) << instruction->ToString();
return GetUniqueValueAt(instruction, index);
}
HloValue& HloDataflowAnalysis::GetValueDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) {
CHECK(ValueIsDefinedAt(instruction, index));
return GetUniqueValueAt(instruction, index);
}
HloValue* HloDataflowAnalysis::NewHloValue(HloInstruction* instruction,
const ShapeIndex& index,
bool is_phi) {
const int64_t value_id = next_value_id_++;
auto result =
values_.insert({value_id, std::make_unique<HloValue>(
value_id, instruction, index, is_phi)});
CHECK(result.second);
VLOG(4) << "NewHloValue = " << result.first->second->ToShortString();
return result.first->second.get();
}
void HloDataflowAnalysis::MarkValueForDeletion(HloValue::Id value_id) {
const HloValue& value = GetValue(value_id);
VLOG(4) << "MarkValueForDeletion(" << value.ToShortString() << ")";
value_ids_to_delete_.push_back(value_id);
}
void HloDataflowAnalysis::DeleteMarkedValues() {
absl::flat_hash_set<HloValue::Id> id_set(value_ids_to_delete_.begin(),
value_ids_to_delete_.end());
#ifndef NDEBUG
for (const auto& pair : value_sets_) {
const HloInstruction* instruction = pair.first;
const InstructionValueSet& instruction_value_set = *pair.second;
for (const auto& index_value_set : instruction_value_set) {
const HloValueSet& value_set = index_value_set.second;
for (const HloValue* value : value_set.values()) {
DCHECK(!ContainsKey(id_set, value->id()))
<< "Value " << value->ToShortString()
<< " marked for deletion, but still exists in value set for "
"instruction "
<< instruction->name();
}
}
}
#endif
for (HloValue::Id value_id : id_set) {
values_.erase(value_id);
}
value_ids_to_delete_.clear();
}
std::string HloDataflowAnalysis::ToString() const {
std::string out =
StrCat("HloDataflowAnalysis, module ", module_.name(), "\n");
StrAppend(&out, " Instruction value sets:\n");
for (const HloComputation* computation : module_.computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
for (const HloInstruction* instruction : computation->instructions()) {
StrAppend(&out, "Instruction: \n ", instruction->name(), ":\n");
if (instruction->shape().IsTuple()) {
GetInstructionValueSet(instruction)
.ForEachElement([this, &instruction, &out](
const ShapeIndex& index,
const HloValueSet& value_set) {
StrAppend(&out, " tuple index ", index.ToString(), ":\n");
for (const HloValue* value : value_set.values()) {
StrAppend(&out, " ", value->ToShortString(),
ValueIsDefinedAt(instruction, index) ? " (def)" : "",
"\n");
}
});
} else {
const HloValueSet& top_level_value_set =
GetValueSet(instruction, {});
for (const HloValue* value : top_level_value_set.values()) {
StrAppend(&out, " ", value->ToShortString(),
ValueIsDefinedAt(instruction) ? " (def)" : "", "\n");
}
}
}
}
StrAppend(&out, " HloValues:\n");
for (const HloValue* value : values()) {
StrAppend(&out, value->ToString(4));
}
return out;
}
bool HloDataflowAnalysis::Phi(
HloInstruction* instruction,
absl::Span<const InstructionValueSet* const> inputs) {
CHECK(ssa_form_);
VLOG(4) << "Phi(" << instruction->name() << ")";
VLOG(5) << "instruction value set = "
<< GetInstructionValueSet(instruction).ToString();
for (const InstructionValueSet* input : inputs) {
VLOG(5) << "input value set = " << input->ToString();
}
if (bitcast_defines_value_) {
absl::c_for_each(inputs, [&](const InstructionValueSet* input) {
DCHECK(ShapeUtil::Compatible(instruction->shape(), input->shape()));
});
} else {
const Shape& shape = instruction->shape();
PrimitiveType ty = shape.element_type();
bool is_array = shape.IsArray();
absl::c_for_each(inputs, [&](const InstructionValueSet* input) {
DCHECK(
ty == input->shape().element_type() &&
(!is_array ||
ShapeUtil::ElementsIn(shape) ==
ShapeUtil::ElementsIn(input->shape()) ||
ShapeUtil::ArraySize(shape) == ShapeUtil::ArraySize(input->shape())))
<< shape.ToString() << " vs." << input->shape().ToString();
});
}
bool changed = false;
for (auto& pair : GetInstructionValueSet(instruction)) {
const ShapeIndex& index = pair.first;
HloValueSet& value_set = pair.second;
CHECK_LE(value_set.values().size(), 1);
const HloValue* current_value =
value_set.values().size() == 1 ? value_set.values()[0] : nullptr;
std::vector<HloValue::Id> input_value_ids;
for (const InstructionValueSet* input : inputs) {
for (const HloValue* value : input->element(index).values()) {
input_value_ids.push_back(value->id());
}
}
bool current_value_defined_here =
(current_value != nullptr &&
current_value->defining_instruction() == instruction &&
current_value->defining_index() == index);
VLOG(5) << "after input_value_ids.size = " << input_value_ids.size();
if (input_value_ids.empty()) {
CHECK_EQ(value_set.values().size(), 0)
<< "Instruction " << instruction->name() << " at index " << index
<< " previously had non-empty value set. Value set: " << value_set;
} else if (input_value_ids.size() == 1) {
const HloValue& new_value = GetValue(input_value_ids[0]);
if (current_value == nullptr) {
value_set.Clear();
value_set.AddValue(&new_value);
changed = true;
} else if (current_value != &new_value) {
if (current_value_defined_here) {
MarkValueForDeletion(current_value->id());
}
value_set.Clear();
value_set.AddValue(&new_value);
changed = true;
}
} else {
CHECK_GT(input_value_ids.size(), 1);
bool phi_defined_here =
current_value_defined_here && current_value->is_phi();
if (current_value == nullptr || !phi_defined_here) {
value_set.Clear();
value_set.AddValue(NewHloValue(instruction, index, true));
std::vector<HloValue*> inputs;
inputs.reserve(input_value_ids.size());
for (HloValue::Id id : input_value_ids) {
inputs.push_back(&GetValue(id));
}
phi_graph_.RegisterPhi(*value_set.values()[0], inputs);
changed = true;
} else if (phi_defined_here) {
std::vector<HloValue*> new_inputs;
new_inputs.reserve(input_value_ids.size());
for (HloValue::Id id : input_value_ids) {
new_inputs.push_back(&GetValue(id));
}
if (!phi_graph_.InputsEqualTo(*current_value, new_inputs)) {
VLOG(1) << current_value->ToShortString() << " has new phi inputs: ";
phi_graph_.RegisterPhi(*current_value, new_inputs);
changed = true;
}
}
}
}
return changed;
}
const HloValue& HloDataflowAnalysis::GetValue(HloValue::Id value_id) const {
DCHECK(values_.contains(value_id)) << "Value not found: " << value_id;
return *values_.find(value_id)->second;
}
HloValue& HloDataflowAnalysis::GetValue(HloValue::Id value_id) {
DCHECK(values_.contains(value_id)) << "Value not found: " << value_id;
return *values_.find(value_id)->second;
}
HloValueSet HloDataflowAnalysis::GetFlattenedValueSet(
const HloInstruction* instruction) const {
HloValueSet value_set;
const InstructionValueSet& value_set_tree =
GetInstructionValueSet(instruction);
std::vector<const HloValueSet*> all_sets;
for (auto& pair : value_set_tree) {
const HloValueSet& value_set = pair.second;
all_sets.push_back(&value_set);
}
value_set.AssignUnionOf(all_sets);
return value_set;
}
const HloValueSet& HloDataflowAnalysis::GetValueSet(
const HloInstruction* instruction, const ShapeIndex& index) const {
return GetInstructionValueSet(instruction).element(index);
}
HloValueSet& HloDataflowAnalysis::GetValueSet(const HloInstruction* instruction,
const ShapeIndex& index) {
return *GetInstructionValueSet(instruction).mutable_element(index);
}
const HloValueSet& HloDataflowAnalysis::GetValueSet(
const HloPosition& position) const {
return GetValueSet(position.instruction, position.index);
}
HloValueSet& HloDataflowAnalysis::GetValueSet(const HloPosition& position) {
return GetValueSet(position.instruction, position.index);
}
bool HloDataflowAnalysis::UpdateBitcastValueSet(HloInstruction* bitcast) {
CHECK_EQ(bitcast->opcode(), HloOpcode::kBitcast);
const InstructionValueSet& operand_set =
GetInstructionValueSet(bitcast->operand(0));
InstructionValueSet& bitcast_set = GetInstructionValueSet(bitcast);
if (!bitcast_defines_value_ && operand_set != bitcast_set) {
bitcast_set = operand_set;
return true;
}
return false;
}
bool HloDataflowAnalysis::UpdateSendValueSet(HloInstruction* send) {
CHECK_EQ(send->opcode(), HloOpcode::kSend);
bool changed = false;
for (auto& pair : GetInstructionValueSet(send->operand(0))) {
const ShapeIndex& operand_index = pair.first;
const HloValueSet& operand_value_set = pair.second;
ShapeIndex index = {0};
for (int64_t i : operand_index) {
index.push_back(i);
}
HloValueSet& value_set = GetValueSet(send, index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateAsyncStartValueSet(
HloInstruction* async_start) {
CHECK_EQ(async_start->opcode(), HloOpcode::kAsyncStart);
bool changed = false;
for (int64_t i = 0; i < async_start->operand_count(); ++i) {
const HloInstruction* operand = async_start->operand(i);
ShapeUtil::ForEachSubshape(
operand->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
const HloValueSet& operand_value_set = GetValueSet(operand, index);
ShapeIndex output_index = {0, i};
output_index.insert(output_index.end(), index.begin(), index.end());
HloValueSet& value_set = GetValueSet(async_start, output_index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
});
}
if (!HloInstruction::IsThreadIncluded(async_start->async_execution_thread(),
execution_threads_)) {
return changed;
}
HloInstruction* root =
async_start->async_wrapped_computation()->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
const HloValueSet& root_value_set = GetValueSet(root, index);
ShapeIndex output_index = {1};
output_index.insert(output_index.end(), index.begin(), index.end());
HloValueSet& value_set = GetValueSet(async_start, output_index);
if (value_set != root_value_set) {
value_set = root_value_set;
changed = true;
}
});
return changed;
}
bool HloDataflowAnalysis::UpdateAsyncUpdateValueSet(
HloInstruction* async_update) {
CHECK_EQ(async_update->opcode(), HloOpcode::kAsyncUpdate);
CHECK_EQ(async_update->shape(), async_update->operand(0)->shape());
bool changed = false;
HloInstruction* root =
HloInstruction::IsThreadIncluded(async_update->async_execution_thread(),
execution_threads_)
? async_update->async_wrapped_computation()->root_instruction()
: nullptr;
ShapeUtil::ForEachSubshape(
async_update->operand(0)->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
const HloValueSet& operand_value_set =
GetValueSet(async_update->operand(0), index);
HloValueSet& value_set = GetValueSet(async_update, index);
CHECK_GE(index.size(), 0);
if (index[0] != 1) {
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
} else if (root != nullptr) {
ShapeIndex root_index(index.begin() + 1, index.end());
const HloValueSet& root_value_set = GetValueSet(root, root_index);
changed |=
value_set.AssignUnionOf({&operand_value_set, &root_value_set});
} else if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
});
return changed;
}
bool HloDataflowAnalysis::UpdateAsyncDoneValueSet(HloInstruction* async_done) {
CHECK_EQ(async_done->opcode(), HloOpcode::kAsyncDone);
bool changed = false;
HloInstruction* root =
HloInstruction::IsThreadIncluded(async_done->async_execution_thread(),
execution_threads_)
? async_done->async_wrapped_computation()->root_instruction()
: nullptr;
ShapeUtil::ForEachSubshape(
async_done->operand(0)->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray() || index.front() != 1) {
return;
}
const HloValueSet& operand_value_set =
GetValueSet(async_done->operand(0), index);
ShapeIndex output_index(index.begin() + 1, index.end());
HloValueSet& value_set = GetValueSet(async_done, output_index);
if (root != nullptr) {
const HloValueSet& root_value_set = GetValueSet(root, output_index);
changed |=
value_set.AssignUnionOf({&operand_value_set, &root_value_set});
} else if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
});
return changed;
}
bool HloDataflowAnalysis::UpdateCopyStartValueSet(HloInstruction* copy_start) {
CHECK_EQ(copy_start->opcode(), HloOpcode::kCopyStart);
bool changed = false;
const HloValueSet& operand_value_set = GetValueSet(copy_start->operand(0));
HloValueSet& value_set = GetValueSet(copy_start, {1});
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
return changed;
}
bool HloDataflowAnalysis::UpdateCopyDoneValueSet(HloInstruction* copy_done) {
CHECK_EQ(copy_done->opcode(), HloOpcode::kCopyDone);
bool changed = false;
const HloValueSet& operand_value_set =
GetValueSet(copy_done->operand(0), {0});
HloValueSet& value_set = GetValueSet(copy_done);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
return changed;
}
bool HloDataflowAnalysis::UpdateRecvDoneValueSet(HloInstruction* recv_done) {
CHECK_EQ(recv_done->opcode(), HloOpcode::kRecvDone);
bool changed = false;
for (auto& pair : GetInstructionValueSet(recv_done)) {
ShapeIndex& index = pair.first;
HloValueSet& value_set = pair.second;
if (index.empty() || index[0] != 0) {
continue;
}
const HloValueSet& operand_value_set =
GetValueSet(recv_done->operand(0), index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateCallValueSet(HloInstruction* call) {
CHECK_EQ(call->opcode(), HloOpcode::kCall);
InstructionValueSet& value_set = GetInstructionValueSet(call);
InstructionValueSet& root_value_set =
GetInstructionValueSet(call->to_apply()->root_instruction());
if (value_set != root_value_set) {
value_set = root_value_set;
return true;
}
return false;
}
bool HloDataflowAnalysis::UpdateConditionalValueSet(
HloInstruction* conditional) {
CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);
std::vector<const InstructionValueSet*> inputs(conditional->branch_count());
for (int j = 0; j < conditional->branch_count(); ++j) {
inputs[j] = &GetInstructionValueSet(
conditional->branch_computation(j)->root_instruction());
}
if (ssa_form_) {
return Phi(conditional, inputs);
} else {
return GetInstructionValueSet(conditional).AssignUnionOf(inputs);
}
}
bool HloDataflowAnalysis::UpdateCopyValueSet(HloInstruction* copy) {
CHECK_EQ(copy->opcode(), HloOpcode::kCopy);
bool changed = false;
for (auto& pair : GetInstructionValueSet(copy)) {
const ShapeIndex& index = pair.first;
if (index.empty()) {
continue;
}
HloValueSet& value_set = pair.second;
HloValueSet& operand_value_set = GetValueSet(copy->operand(0), index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateOptimizationBarrierValueSet(
HloInstruction* barrier) {
CHECK_EQ(barrier->opcode(), HloOpcode::kOptimizationBarrier);
bool changed = false;
for (auto& pair : GetInstructionValueSet(barrier)) {
const ShapeIndex& index = pair.first;
HloValueSet& value_set = pair.second;
HloValueSet& operand_value_set = GetValueSet(barrier->operand(0), index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateDomainValueSet(HloInstruction* domain) {
CHECK_EQ(domain->opcode(), HloOpcode::kDomain);
bool changed = false;
for (auto& pair : GetInstructionValueSet(domain)) {
const ShapeIndex& index = pair.first;
HloValueSet& value_set = pair.second;
HloValueSet& operand_value_set = GetValueSet(domain->operand(0), index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateAddDependencyValueSet(
HloInstruction* add_dependency) {
CHECK_EQ(add_dependency->opcode(), HloOpcode::kAddDependency);
const InstructionValueSet& operand_set =
GetInstructionValueSet(add_dependency->operand(0));
InstructionValueSet& add_dependency_set =
GetInstructionValueSet(add_dependency);
if (operand_set != add_dependency_set) {
add_dependency_set = operand_set;
return true;
}
return false;
}
bool HloDataflowAnalysis::UpdateGetTupleElementValueSet(HloInstruction* gte) {
CHECK_EQ(gte->opcode(), HloOpcode::kGetTupleElement);
bool changed = false;
for (auto& pair : GetInstructionValueSet(gte)) {
const ShapeIndex& index = pair.first;
HloValueSet& value_set = pair.second;
ShapeIndex operand_index = {gte->tuple_index()};
for (int64_t i : index) {
operand_index.push_back(i);
}
HloValueSet& operand_value_set =
GetValueSet(gte->operand(0), operand_index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateParameterValueSet(HloInstruction* parameter) {
CHECK_EQ(parameter->opcode(), HloOpcode::kParameter);
const CallGraphNode& call_graph_node =
call_graph_->GetNode(parameter->parent());
if (call_graph_node.context() == CallContext::kEmbedded ||
call_graph_node.caller_callsites().empty()) {
return false;
}
CHECK_EQ(call_graph_node.context(), CallContext::kControlFlow);
std::vector<const InstructionValueSet*> inputs;
bool need_phi = false;
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
const HloOpcode& opcode = callsite.instruction()->opcode();
if (opcode == HloOpcode::kCall) {
inputs.push_back(&GetInstructionValueSet(
callsite.instruction()->operand(parameter->parameter_number())));
} else if (opcode == HloOpcode::kWhile) {
CHECK_EQ(parameter->parameter_number(), 0);
inputs.push_back(
&GetInstructionValueSet(callsite.instruction()->operand(0)));
if (parameter !=
callsite.instruction()->while_body()->root_instruction()) {
inputs.push_back(&GetInstructionValueSet(
callsite.instruction()->while_body()->root_instruction()));
}
need_phi = true;
} else if (opcode == HloOpcode::kConditional) {
CHECK_EQ(parameter->parameter_number(), 0);
auto conditional = callsite.instruction();
bool found_parent = false;
for (int j = 0; j < conditional->branch_count(); ++j) {
if (parameter->parent() == conditional->branch_computation(j)) {
inputs.push_back(
&GetInstructionValueSet(conditional->operand(j + 1)));
found_parent = true;
break;
}
}
CHECK(found_parent);
need_phi = true;
} else if (opcode == HloOpcode::kAsyncStart) {
inputs.push_back(&GetInstructionValueSet(
callsite.instruction()->operand(parameter->parameter_number())));
} else if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
return GetInstructionValueSet(parameter).AssignUnionOf(
GetInstructionValueSet(callsite.instruction()->operand(0)),
{0, parameter->parameter_number()});
} else {
LOG(FATAL) << "CallContext::kSequential computations should only be "
"called from call, while, or conditional instructions";
}
}
if (ssa_form_ && need_phi) {
return Phi(parameter, inputs);
} else {
return GetInstructionValueSet(parameter).AssignUnionOf(inputs);
}
}
bool HloDataflowAnalysis::UpdateTupleValueSet(HloInstruction* tuple) {
CHECK_EQ(tuple->opcode(), HloOpcode::kTuple);
bool changed = false;
for (int64_t i = 0; i < tuple->operands().size(); ++i) {
for (auto& pair : GetInstructionValueSet(tuple->operand(i))) {
const ShapeIndex& operand_index = pair.first;
HloValueSet& operand_value_set = pair.second;
ShapeIndex index = {i};
for (int64_t op_index : operand_index) {
index.push_back(op_index);
}
HloValueSet& value_set = GetValueSet(tuple, index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateWhileValueSet(HloInstruction* xla_while) {
CHECK_EQ(xla_while->opcode(), HloOpcode::kWhile);
const InstructionValueSet* const inputs[] = {
&GetInstructionValueSet(xla_while->while_body()->root_instruction()),
&GetInstructionValueSet(xla_while->operand(0))};
if (ssa_form_) {
return Phi(xla_while, inputs);
} else {
return GetInstructionValueSet(xla_while).AssignUnionOf(inputs);
}
}
bool HloDataflowAnalysis::UpdateAllGatherStartValueSet(
HloInstruction* all_gather_start) {
CHECK_EQ(all_gather_start->opcode(), HloOpcode::kAllGatherStart);
bool changed = false;
for (int64_t i = 0; i < all_gather_start->operand_count(); ++i) {
const HloValueSet& operand_value_set =
GetValueSet(all_gather_start->operand(i));
ShapeIndex output_index = {0};
if (all_gather_start->operand_count() > 1) {
output_index.push_back(i);
}
HloValueSet& value_set = GetValueSet(all_gather_start, output_index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateAllGatherDoneValueSet(
HloInstruction* all_gather_done) {
CHECK_EQ(all_gather_done->opcode(), HloOpcode::kAllGatherDone);
bool changed = false;
for (auto& pair : GetInstructionValueSet(all_gather_done)) {
const ShapeIndex& output_index = pair.first;
HloValueSet& value_set = pair.second;
if (!ShapeUtil::GetSubshape(all_gather_done->shape(), output_index)
.IsArray()) {
continue;
}
ShapeIndex operand_index = {1};
for (int64_t i : output_index) {
operand_index.push_back(i);
}
const HloValueSet& operand_value_set =
GetValueSet(all_gather_done->operand(0), operand_index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateAllReduceDoneValueSet(
HloInstruction* all_reduce_done) {
CHECK_EQ(all_reduce_done->opcode(), HloOpcode::kAllReduceDone);
bool changed = false;
for (auto& pair : GetInstructionValueSet(all_reduce_done)) {
const ShapeIndex& output_index = pair.first;
HloValueSet& value_set = pair.second;
ShapeIndex operand_index = {};
for (int64_t i : output_index) {
operand_index.push_back(i);
}
const HloValueSet& operand_value_set =
GetValueSet(all_reduce_done->operand(0), operand_index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateCollectivePermuteStartValueSet(
HloInstruction* collective_permute_start) {
CHECK_EQ(collective_permute_start->opcode(),
HloOpcode::kCollectivePermuteStart);
bool changed = false;
if (collective_permute_start->operand(0)->shape().IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(
collective_permute_start->operand(0)->shape());
++i) {
const HloValueSet& operand_value_set =
GetValueSet(collective_permute_start->operand(0), {i});
HloValueSet& value_set = GetValueSet(collective_permute_start, {0, i});
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
} else {
const HloValueSet& operand_value_set =
GetValueSet(collective_permute_start->operand(0));
HloValueSet& value_set = GetValueSet(collective_permute_start, {0});
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateCollectivePermuteDoneValueSet(
HloInstruction* collective_permute_done) {
CHECK_EQ(collective_permute_done->opcode(),
HloOpcode::kCollectivePermuteDone);
bool changed = false;
if (collective_permute_done->shape().IsTuple()) {
for (int i = 0;
i < ShapeUtil::TupleElementCount(collective_permute_done->shape());
++i) {
const HloValueSet& operand_value_set =
GetValueSet(collective_permute_done->operand(0), {1, i});
HloValueSet& value_set = GetValueSet(collective_permute_done, {i});
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
} else {
const HloValueSet& operand_value_set =
GetValueSet(collective_permute_done->operand(0), {1});
HloValueSet& value_set = GetValueSet(collective_permute_done);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
return changed;
}
bool HloDataflowAnalysis::UpdateInstructionValueSet(
HloInstruction* instruction) {
bool changed = false;
switch (instruction->opcode()) {
case HloOpcode::kAddDependency: {
changed = UpdateAddDependencyValueSet(instruction);
break;
}
case HloOpcode::kAllGatherStart: {
changed = UpdateAllGatherStartValueSet(instruction);
break;
}
case HloOpcode::kAllGatherDone: {
changed = UpdateAllGatherDoneValueSet(instruction);
break;
}
case HloOpcode::kAsyncStart: {
changed = UpdateAsyncStartValueSet(instruction);
break;
}
case HloOpcode::kAsyncUpdate: {
changed = UpdateAsyncUpdateValueSet(instruction);
break;
}
case HloOpcode::kAsyncDone: {
changed = UpdateAsyncDoneValueSet(instruction);
break;
}
case HloOpcode::kBitcast: {
changed = UpdateBitcastValueSet(instruction);
break;
}
case HloOpcode::kDomain: {
changed = UpdateDomainValueSet(instruction);
break;
}
case HloOpcode::kCopy: {
changed = UpdateCopyValueSet(instruction);
break;
}
case HloOpcode::kGetTupleElement: {
changed = UpdateGetTupleElementValueSet(instruction);
break;
}
case HloOpcode::kTuple: {
changed = UpdateTupleValueSet(instruction);
break;
}
case HloOpcode::kParameter: {
changed = UpdateParameterValueSet(instruction);
break;
}
case HloOpcode::kCall: {
changed = UpdateCallValueSet(instruction);
break;
}
case HloOpcode::kWhile: {
changed = UpdateWhileValueSet(instruction);
break;
}
case HloOpcode::kSend: {
changed = UpdateSendValueSet(instruction);
break;
}
case HloOpcode::kRecvDone: {
changed = UpdateRecvDoneValueSet(instruction);
break;
}
case HloOpcode::kCopyStart: {
changed = UpdateCopyStartValueSet(instruction);
break;
}
case HloOpcode::kCopyDone: {
changed = UpdateCopyDoneValueSet(instruction);
break;
}
case HloOpcode::kConditional: {
changed = UpdateConditionalValueSet(instruction);
break;
}
case HloOpcode::kAllReduceDone: {
changed = UpdateAllReduceDoneValueSet(instruction);
break;
}
case HloOpcode::kCollectivePermuteStart: {
changed = UpdateCollectivePermuteStartValueSet(instruction);
break;
}
case HloOpcode::kCollectivePermuteDone: {
changed = UpdateCollectivePermuteDoneValueSet(instruction);
break;
}
case HloOpcode::kOptimizationBarrier: {
changed = UpdateOptimizationBarrierValueSet(instruction);
break;
}
default:
break;
}
if (forwards_value_ != nullptr) {
for (auto& [index, value_set] : GetInstructionValueSet(instruction)) {
if (std::optional<ForwardedOperand> forwarded_operand =
forwards_value_(instruction, index);
forwarded_operand.has_value()) {
HloValueSet& operand_value_set =
GetValueSet(instruction->operand(forwarded_operand->operand_number),
forwarded_operand->operand_index);
if (value_set != operand_value_set) {
value_set = operand_value_set;
changed = true;
}
}
}
}
return changed;
}
void HloDataflowAnalysis::Propagate() {
using Work = std::pair<int64_t, HloInstruction*>;
std::priority_queue<Work, std::vector<Work>, std::greater<Work>> worklist;
absl::flat_hash_set<HloInstruction*> workset;
auto priority_map = CalculatePostOrderSchedule(module_);
auto add_to_worklist = [&priority_map, &worklist,
&workset](HloInstruction* instruction) {
if (workset.insert(instruction).second) {
worklist.emplace(priority_map[instruction], instruction);
}
};
auto comps = module_.MakeComputationPostOrder();
for (HloComputation* computation : comps) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
add_to_worklist(instruction);
}
}
VLOG(1) << "SSA_FORM_: " << ssa_form_;
while (!worklist.empty()) {
HloInstruction* instruction = worklist.top().second;
worklist.pop();
workset.erase(workset.find(instruction));
VLOG(3) << "Worklist top: " << instruction->name();
XLA_VLOG_LINES(3, ToString());
if (!UpdateInstructionValueSet(instruction)) {
VLOG(4) << "No change.";
continue;
}
VLOG(4) << "New value set for " << instruction->name() << ": "
<< GetInstructionValueSet(instruction);
for (HloInstruction* user : instruction->users()) {
add_to_worklist(user);
if (user->opcode() == HloOpcode::kConditional) {
for (int j = 0; j < user->branch_count(); ++j) {
if (user->operand(j + 1) == instruction) {
add_to_worklist(
user->branch_computation(j)->parameter_instruction(0));
}
}
} else if (user->opcode() == HloOpcode::kAsyncUpdate ||
user->opcode() == HloOpcode::kAsyncDone) {
if (HloInstruction::IsThreadIncluded(user->async_execution_thread(),
execution_threads_)) {
for (int64_t parameter_number = 0;
parameter_number <
user->async_wrapped_computation()->num_parameters();
++parameter_number) {
add_to_worklist(
user->async_wrapped_computation()->parameter_instruction(
parameter_number));
}
}
} else {
for (HloComputation* called_computation : user->called_computations()) {
if (!HloInstruction::IsThreadIncluded(
called_computation->execution_thread(), execution_threads_)) {
continue;
}
const CallGraphNode& call_graph_node =
call_graph_->GetNode(called_computation);
if (call_graph_node.context() == CallContext::kControlFlow) {
for (int64_t operand_number : user->OperandIndices(instruction)) {
add_to_worklist(
called_computation->parameter_instruction(operand_number));
}
}
}
}
}
if (instruction == instruction->parent()->root_instruction()) {
const CallGraphNode& call_graph_node =
call_graph_->GetNode(instruction->parent());
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
add_to_worklist(callsite.instruction());
add_to_worklist(
callsite.instruction()->while_body()->parameter_instruction(0));
add_to_worklist(
callsite.instruction()->while_condition()->parameter_instruction(
0));
} else if (call_graph_node.context() == CallContext::kControlFlow) {
add_to_worklist(callsite.instruction());
}
}
}
}
}
const InstructionValueSet& HloDataflowAnalysis::GetInstructionValueSet(
const HloInstruction* instruction) const {
DCHECK(value_sets_.contains(instruction))
<< "Instruction " << instruction->ToString() << " not found.";
return *value_sets_.find(instruction)->second;
}
InstructionValueSet& HloDataflowAnalysis::GetInstructionValueSet(
const HloInstruction* instruction) {
DCHECK(value_sets_.contains(instruction))
<< "Instruction " << instruction->ToString() << " not found.";
return *value_sets_.find(instruction)->second;
}
absl::Status HloDataflowAnalysis::InitializeInstructionValueSets() {
for (const HloComputation* computation : module_.MakeComputationSorted()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
const CallGraphNode& call_graph_node = call_graph_->GetNode(computation);
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
value_sets_.insert({instruction, std::make_unique<InstructionValueSet>(
instruction->shape())});
auto define_all_values =
[this, &instruction](
absl::FunctionRef<bool(const ShapeIndex&)> should_define =
[](const ShapeIndex&) { return true; }) {
for (auto& pair : GetInstructionValueSet(instruction)) {
const ShapeIndex& index = pair.first;
bool defines_value;
if (forwards_value_ != nullptr &&
forwards_value_(instruction, index).has_value()) {
defines_value = false;
} else {
defines_value = should_define(index);
}
if (defines_value) {
HloValue* value =
NewHloValue(instruction, index, false);
GetValueSet(instruction, index).AddValue(value);
}
}
};
auto define_value_at = [this, &instruction](const ShapeIndex& index) {
HloValue* value = NewHloValue(instruction, index, false);
GetValueSet(instruction, index).AddValue(value);
};
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
if (bitcast_defines_value_) {
define_all_values();
}
break;
case HloOpcode::kAddDependency:
case HloOpcode::kWhile:
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kGetTupleElement:
case HloOpcode::kDomain:
case HloOpcode::kOptimizationBarrier:
break;
case HloOpcode::kParameter:
if (call_graph_node.context() == CallContext::kBoth) {
return Unimplemented(
"Computation %s is called in both a parallel (eg, kMap) and "
"sequential (eg, kCall) context",
computation->name());
}
if (call_graph_node.caller_callsites().empty() ||
call_graph_node.context() == CallContext::kEmbedded) {
define_all_values();
}
break;
case HloOpcode::kCopy:
case HloOpcode::kTuple:
define_value_at({});
break;
case HloOpcode::kAsyncStart: {
bool thread_included = HloInstruction::IsThreadIncluded(
instruction->async_execution_thread(), execution_threads_);
define_all_values([&](const ShapeIndex& index) {
return ShapeUtil::GetSubshape(instruction->shape(), index)
.IsTuple() ||
(!thread_included && index.front() == 1) ||
(index.front() > 1);
});
break;
}
case HloOpcode::kAsyncUpdate:
define_all_values([&](const ShapeIndex& index) {
return ShapeUtil::GetSubshape(instruction->shape(), index)
.IsTuple();
});
break;
case HloOpcode::kAsyncDone:
define_all_values([&](const ShapeIndex& index) {
return ShapeUtil::GetSubshape(instruction->shape(), index)
.IsTuple();
});
break;
case HloOpcode::kCopyStart:
define_value_at({});
define_value_at({0});
define_value_at({2});
break;
case HloOpcode::kCopyDone:
break;
case HloOpcode::kAllGatherStart:
define_all_values([&](const ShapeIndex& index) {
return ShapeUtil::GetSubshape(instruction->shape(), index)
.IsTuple() ||
index.front() == 1;
});
break;
case HloOpcode::kAllGatherDone:
if (instruction->shape().IsTuple()) {
define_value_at({});
}
break;
case HloOpcode::kAllReduceDone:
break;
case HloOpcode::kCollectivePermuteStart:
define_value_at({});
define_value_at({1});
for (int i = 2; i < instruction->shape().tuple_shapes_size(); ++i) {
define_value_at({i});
}
if (instruction->operand_count() > 1) {
CHECK_EQ(instruction->operand_count(), 4);
if (instruction->operand(1)->shape().IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(
instruction->operand(1)->shape());
++i) {
define_value_at({1, i});
}
}
}
break;
case HloOpcode::kCollectivePermuteDone:
if (instruction->shape().IsTuple()) {
define_value_at({});
}
break;
case HloOpcode::kRecvDone:
define_value_at({});
define_value_at({1});
break;
case HloOpcode::kSend:
define_value_at({});
define_value_at({1});
define_value_at({2});
break;
default:
define_all_values();
break;
}
}
}
return absl::OkStatus();
}
void HloDataflowAnalysis::OptimizePhiValues() {
if (!ssa_form_) {
return;
}
VLOG(1) << "Before phi graph optimization";
XLA_VLOG_LINES(1, phi_graph_.ToString());
phi_graph_.Optimize();
VLOG(1) << "After phi graph optimization";
XLA_VLOG_LINES(1, phi_graph_.ToString());
for (const HloComputation* computation : module_.computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
for (HloInstruction* instruction : computation->instructions()) {
InstructionValueSet& instruction_value_set =
GetInstructionValueSet(instruction);
VLOG(1) << "inst: " << instruction->name();
VLOG(1) << instruction_value_set.ToString();
instruction_value_set.ForEachMutableElement(
[&](const xla::ShapeIndex& index, HloValueSet* value_set) {
auto values = value_set->values();
if (!(values.size() == 1 && values[0]->is_phi())) {
return;
}
HloValue::Id phi_id = values[0]->id();
HloValue::Id new_id = phi_graph_.FindOptimizedValue(phi_id);
if (new_id != phi_id) {
VLOG(1) << "Replacing " << values[0]->ToShortString() << " with "
<< GetValue(new_id).ToShortString();
value_set->Clear();
const HloValue& new_value = GetValue(new_id);
value_set->AddValue(&new_value);
MarkValueForDeletion(phi_id);
}
});
}
}
}
absl::StatusOr<std::unique_ptr<HloDataflowAnalysis>> HloDataflowAnalysis::Run(
const HloModule& module, bool ssa_form, bool bitcast_defines_value,
const CanShareBuffer& can_share_buffer, const ForwardsValue& forwards_value,
absl::flat_hash_set<absl::string_view> execution_threads) {
VLOG(1) << "HloDataflowAnalysis::Run on module " << module.name();
XLA_VLOG_LINES(2, module.ToString());
auto dataflow_analysis = absl::WrapUnique(new HloDataflowAnalysis(
module, ssa_form, bitcast_defines_value, can_share_buffer, forwards_value,
execution_threads));
TF_RETURN_IF_ERROR(dataflow_analysis->InitializeInstructionValueSets());
dataflow_analysis->Propagate();
dataflow_analysis->OptimizePhiValues();
dataflow_analysis->DeleteMarkedValues();
std::vector<std::vector<HloPosition>> value_positions(
dataflow_analysis->next_value_id_);
for (const HloComputation* computation : module.computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads)) {
continue;
}
for (HloInstruction* instruction : computation->instructions()) {
for (const auto& pair :
dataflow_analysis->GetInstructionValueSet(instruction)) {
const ShapeIndex& index = pair.first;
const HloValueSet& value_set = pair.second;
for (const HloValue* value : value_set.values()) {
if (value->defining_instruction() != instruction) {
value_positions[value->id()].push_back(
HloPosition{instruction, index});
}
}
}
}
}
for (auto& pair : dataflow_analysis->values_) {
HloValue::Id value_id = pair.first;
HloValue& value = *pair.second;
value.SetPositions(value_positions[value_id]);
}
dataflow_analysis->values_vector_.reserve(dataflow_analysis->values_.size());
for (const auto& pair : dataflow_analysis->values_) {
dataflow_analysis->values_vector_.push_back(pair.second.get());
}
absl::c_sort(dataflow_analysis->values_vector_, HloValue::IdLessThan);
TF_DCHECK_OK(dataflow_analysis->Verify());
XLA_VLOG_LINES(1, dataflow_analysis->ToString());
return std::move(dataflow_analysis);
}
absl::Status HloDataflowAnalysis::Verify() const {
for (const HloValue* value : values()) {
for (const HloPosition& position : value->positions()) {
const HloValueSet& value_set = GetValueSet(position);
TF_RET_CHECK(absl::c_linear_search(value_set.values(), value))
<< "Value set at position " << position << " does not contain value "
<< value->ToShortString();
}
}
for (const auto& computation : module_.computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads_)) {
continue;
}
for (const auto& instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAsyncStart &&
(instruction->async_wrapped_opcode() == HloOpcode::kCall ||
instruction->async_wrapped_opcode() == HloOpcode::kCustomCall)) {
continue;
}
for (const auto& pair : GetInstructionValueSet(instruction)) {
const ShapeIndex& index = pair.first;
const HloValueSet& value_set = pair.second;
const HloPosition position{instruction, index};
for (const HloValue* value : value_set.values()) {
TF_RET_CHECK(absl::c_linear_search(value->positions(), position))
<< "Value set at position " << position
<< " unexpectedly contains value " << value->ToShortString();
}
}
}
}
return absl::OkStatus();
}
bool HloDataflowAnalysis::DoesNotUseOperandBuffer(
const HloInstruction* operand, const ShapeIndex& index,
const HloInstruction* user) const {
for (const HloValue* value : GetValueSet(operand, index).values()) {
for (const HloUse& use : value->GetUses()) {
if (use.instruction == user) {
if (user->IsLoopFusion()) {
HloInstruction* fusion_param =
user->fused_parameter(use.operand_number);
const HloValue& value =
GetValueDefinedAt(fusion_param, use.operand_index);
return value.GetUses().empty();
}
return false;
}
}
}
return true;
}
bool HloDataflowAnalysis::IsInPlaceOperation(HloOpcode opcode) {
return opcode == HloOpcode::kDynamicUpdateSlice ||
opcode == HloOpcode::kScatter;
}
bool HloDataflowAnalysis::IsAsynchronousOperationStart(
HloOpcode opcode) {
return opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv ||
opcode == HloOpcode::kCopyStart ||
opcode == HloOpcode::kAllReduceStart ||
opcode == HloOpcode::kAllGatherStart ||
opcode == HloOpcode::kCollectivePermuteStart ||
opcode == HloOpcode::kAsyncStart;
}
bool HloDataflowAnalysis::IsAsynchronousOperationDone(
HloOpcode opcode) {
return opcode == HloOpcode::kSendDone || opcode == HloOpcode::kRecvDone ||
opcode == HloOpcode::kCopyDone ||
opcode == HloOpcode::kAllReduceDone ||
opcode == HloOpcode::kAllGatherDone ||
opcode == HloOpcode::kCollectivePermuteDone ||
opcode == HloOpcode::kAsyncDone;
}
namespace {
std::vector<std::pair<HloOperandIndex, ShapeIndex>>
GetFusionInstructionInPlaceInputOutputPairs(const HloInstruction* instruction) {
std::vector<std::pair<HloOperandIndex, ShapeIndex>>
in_place_input_output_pairs;
ShapeUtil::ForEachLeafShape(
instruction->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
const HloInstruction* output_source_instruction =
instruction->fused_expression_root();
ShapeIndex output_source_index = index;
std::tie(output_source_instruction, output_source_index) =
FollowTupleIndirection(output_source_instruction,
output_source_index);
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(
output_source_instruction);
ShapeIndex in_place_input_index;
const HloInstruction* in_place_input_source = nullptr;
for (const auto& output_source_in_place_pair : in_place_pairs) {
const HloOperandIndex& input = output_source_in_place_pair.first;
const ShapeIndex& output_index = output_source_in_place_pair.second;
if (output_index == output_source_index) {
CHECK(in_place_input_source == nullptr);
in_place_input_source =
output_source_instruction->operand(input.operand_number);
in_place_input_index = input.operand_index;
std::tie(in_place_input_source, in_place_input_index) =
FollowTupleIndirection(in_place_input_source,
in_place_input_index);
if (in_place_input_source->opcode() == HloOpcode::kFusion) {
auto nested_in_place_input_output_pairs =
HloDataflowAnalysis::GetInPlaceInputOutputPairs(
in_place_input_source);
for (const auto& pair : nested_in_place_input_output_pairs) {
if (pair.second == in_place_input_index) {
in_place_input_source =
in_place_input_source->operand(pair.first.operand_number);
in_place_input_index = pair.first.operand_index;
std::tie(in_place_input_source, in_place_input_index) =
FollowTupleIndirection(in_place_input_source,
in_place_input_index);
}
}
}
}
}
if (in_place_input_source != nullptr &&
in_place_input_source->opcode() == HloOpcode::kParameter) {
in_place_input_output_pairs.emplace_back(
HloOperandIndex{in_place_input_source->parameter_number(),
in_place_input_index},
index);
}
});
return in_place_input_output_pairs;
}
}
std::vector<std::pair<HloOperandIndex, ShapeIndex>>
HloDataflowAnalysis::GetInPlaceInputOutputPairs(
const HloInstruction* instruction) {
if (IsInPlaceOperation(instruction->opcode())) {
const HloScatterInstruction* scatter =
DynCast<HloScatterInstruction>(instruction);
if (scatter && scatter->scatter_operand_count() > 1) {
std::vector<std::pair<HloOperandIndex, ShapeIndex>> pairs;
pairs.reserve(scatter->scatter_operand_count());
for (int i = 0, n = scatter->scatter_operand_count(); i < n; ++i) {
pairs.emplace_back(HloOperandIndex{i, {}}, ShapeIndex{i});
}
return pairs;
}
return {{HloOperandIndex{0, {}}, {}}};
} else if (instruction->opcode() == HloOpcode::kCollectivePermute &&
instruction->operands().size() == 4) {
if (instruction->operand(1)->shape().IsTuple()) {
std::vector<std::pair<HloOperandIndex, ShapeIndex>> in_place_pairs(
{{HloOperandIndex{1, {}}, {}}});
for (int i = 0; i < instruction->operand(1)->shape().tuple_shapes_size();
i++) {
in_place_pairs.push_back({HloOperandIndex{1, {i}}, {i}});
}
return in_place_pairs;
} else {
return {{HloOperandIndex{1, {}}, {}}};
}
} else if (instruction->opcode() == HloOpcode::kCollectivePermuteStart &&
instruction->operands().size() == 4) {
if (instruction->operand(1)->shape().IsTuple()) {
std::vector<std::pair<HloOperandIndex, ShapeIndex>> in_place_pairs(
{{HloOperandIndex{1, {}}, {1}}});
for (int i = 0; i < instruction->operand(1)->shape().tuple_shapes_size();
i++) {
in_place_pairs.push_back({HloOperandIndex{1, {i}}, {1, i}});
}
return in_place_pairs;
} else {
return {{HloOperandIndex{1, {}}, {1}}};
}
} else if (instruction->opcode() == HloOpcode::kCustomCall) {
const auto& aliasing_pairs = Cast<HloCustomCallInstruction>(instruction)
->output_to_operand_aliasing();
std::vector<std::pair<HloOperandIndex, ShapeIndex>> in_place_pairs;
in_place_pairs.reserve(aliasing_pairs.size());
for (const auto& pair : aliasing_pairs) {
ShapeIndex output_shape_index = pair.first;
int64_t operand_index = pair.second.first;
ShapeIndex operand_shape_index = pair.second.second;
in_place_pairs.push_back(
{HloOperandIndex{operand_index, {operand_shape_index}},
output_shape_index});
}
return in_place_pairs;
} else if (instruction->opcode() == HloOpcode::kAllReduceStart) {
if (instruction->operands().size() == 1) {
return {{HloOperandIndex{0, {}}, {}}};
}
std::vector<std::pair<HloOperandIndex, ShapeIndex>> in_place_pairs;
in_place_pairs.reserve(instruction->operands().size());
for (int i = 0; i < instruction->operands().size(); i++) {
in_place_pairs.push_back({HloOperandIndex{i, {}}, {i}});
}
return in_place_pairs;
} else if (instruction->opcode() == HloOpcode::kFusion) {
const auto& aliasing_pairs =
Cast<HloFusionInstruction>(instruction)->output_to_operand_aliasing();
auto in_place_pairs =
GetFusionInstructionInPlaceInputOutputPairs(instruction);
if (!aliasing_pairs.empty()) {
for (const auto& pair : aliasing_pairs) {
ShapeIndex output_shape_index = pair.first;
int64_t operand_index = pair.second.first;
ShapeIndex operand_shape_index = pair.second.second;
in_place_pairs.push_back(
{HloOperandIndex{operand_index, {operand_shape_index}},
output_shape_index});
}
}
return in_place_pairs;
} else if (instruction->opcode() == HloOpcode::kSetDimensionSize) {
int64_t dimension = instruction->dimension();
std::vector<std::pair<HloOperandIndex, ShapeIndex>> in_place_pairs;
if (instruction->shape().is_dynamic_dimension(dimension) ==
instruction->shape().is_dynamic_dimension(dimension)) {
in_place_pairs.push_back({HloOperandIndex{0, {}}, {}});
}
return in_place_pairs;
}
return {};
}
bool HloDataflowAnalysis::CanShareOperandBufferWithUser(
HloInstruction* operand, const ShapeIndex& operand_index,
HloInstruction* user, const ShapeIndex& user_index) const {
CHECK(user->IsUserOf(operand))
<< "user: " << user->ToString() << " operand: " << operand->ToString();
if (operand->opcode() == HloOpcode::kConstant) {
return false;
}
const Shape& operand_subshape =
ShapeUtil::GetSubshape(operand->shape(), operand_index);
const Shape& user_subshape =
ShapeUtil::GetSubshape(user->shape(), user_index);
if (IsSliceInputFusion(*user)) {
HloInstruction* fusion_param =
user->fused_parameter(user->operand_index(operand));
return operand_subshape.IsArray() && user_subshape.IsArray() &&
ShapeUtil::ElementsIn(operand_subshape) ==
ShapeUtil::ElementsIn(user_subshape) &&
ShapeUtil::SameElementType(operand_subshape, user_subshape) &&
AreTransitiveUsesEffectivelyElementwise(
fusion_param, user->fused_expression_root(), user_index);
}
auto shapes_equal = ShapeUtil::Equal(operand_subshape, user_subshape);
if (shapes_equal) {
for (const auto& operand_and_output_index :
GetInPlaceInputOutputPairs(user)) {
if (operand_and_output_index.second != user_index) {
continue;
}
for (const HloUse& use :
GetUniqueValueAt(operand, operand_index).GetUses()) {
if (use == HloUse{user, operand_and_output_index.first.operand_number,
operand_and_output_index.first.operand_index}) {
return true;
}
}
}
}
if (can_share_buffer_ != nullptr) {
if (std::optional<bool> hint =
can_share_buffer_(user, operand, user_index)) {
return *hint;
}
}
if (!shapes_equal) {
return false;
}
if (user->opcode() == HloOpcode::kFusion) {
HloInstruction* fusion_param =
user->fused_parameter(user->operand_index(operand));
const HloValue& fusion_param_value =
GetValueDefinedAt(fusion_param, operand_index);
if (user->IsLoopFusion() || user->IsInputFusion()) {
return AreTransitiveUsesElementwiseOrTuple(fusion_param);
}
if (user->IsOutputFusion() &&
user->fused_expression_root()->opcode() == HloOpcode::kAdd) {
auto* add = user->fused_expression_root();
auto add_operand_it =
absl::c_find_if(add->operands(), [&](HloInstruction* operand) {
return operand->opcode() == HloOpcode::kConvolution ||
operand->opcode() == HloOpcode::kDot;
});
if (add_operand_it == add->operands().end()) {
return false;
}
auto* matched_add_operand = *add_operand_it;
const int64_t other_add_operand_index =
matched_add_operand == add->operand(0) ? 1 : 0;
if (fusion_param_value.GetUses().size() == 1) {
const HloUse& use = fusion_param_value.GetUses()[0];
return use.instruction == user->fused_expression_root() &&
use.operand_number == other_add_operand_index;
}
return false;
}
}
if (user->opcode() == HloOpcode::kWhile ||
user->opcode() == HloOpcode::kConditional) {
return true;
}
if (user->opcode() == HloOpcode::kDynamicUpdateSlice ||
user->opcode() == HloOpcode::kScatter ||
user->opcode() == HloOpcode::kTriangularSolve ||
user->opcode() == HloOpcode::kSetDimensionSize) {
const auto operand_indices = user->OperandIndices(operand);
int64_t operand_no = user->opcode() == HloOpcode::kTriangularSolve ? 1 : 0;
return operand_indices.size() == 1 && operand_indices[0] == operand_no;
}
if (user->opcode() == HloOpcode::kSort) {
if (operand->users().size() != 1) {
return false;
}
if (user->operand_count() == 1) {
return true;
}
CHECK(!user_index.empty());
const auto operand_indices = user->OperandIndices(operand);
return operand_indices.size() == 1 && user_index[0] == operand_indices[0];
}
if (user->opcode() == HloOpcode::kCall) {
auto uses = GetValueDefinedAt(operand, operand_index).GetUses();
const bool found_caller_use =
absl::c_find_if(uses, [user](const HloUse& use) {
return use.instruction == user;
}) != uses.end();
auto* callee_root = user->to_apply()->root_instruction();
const bool found_elementwise_callee_use =
absl::c_find_if(uses, [callee_root](const HloUse& use) {
return use.instruction == callee_root &&
callee_root->IsElementwiseOnOperand(use.operand_number);
}) != uses.end();
return uses.size() == 2 && found_caller_use && found_elementwise_callee_use;
}
return user->IsElementwiseOnOperand(user->operand_index(operand));
}
std::pair<const HloInstruction*, ShapeIndex> FollowTupleIndirection(
const HloInstruction* instruction, ShapeIndex operand_index) {
while (instruction->opcode() == HloOpcode::kTuple && !operand_index.empty()) {
instruction = instruction->operand(operand_index.front());
operand_index.pop_front();
}
while (instruction->opcode() == HloOpcode::kGetTupleElement) {
operand_index.push_front(instruction->tuple_index());
instruction = instruction->operand(0);
}
return {instruction, operand_index};
}
} | #include "xla/service/hlo_dataflow_analysis.h"
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
class HloDataflowAnalysisTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
protected:
HloDataflowAnalysisTest() : module_(CreateNewVerifiedModule()) {}
const HloDataflowAnalysis& RunAnalysis(bool ssa_form,
bool bitcast_defines_value = false,
bool run_dce = true) {
if (run_dce) {
HloDCE dce;
EXPECT_TRUE(dce.Run(module_.get()).ok());
}
FlattenCallGraph flatten;
EXPECT_TRUE(flatten.Run(module_.get()).ok());
analysis_ =
HloDataflowAnalysis::Run(*module_, ssa_form, bitcast_defines_value)
.value();
return *analysis_;
}
const std::vector<const HloValue*>& HloValuesAt(
const HloInstruction* instruction, const ShapeIndex& index = {}) {
CHECK(analysis_ != nullptr);
return analysis_->GetValueSet(instruction, index).values();
}
bool InstructionsMayInterfere(const HloOrdering& ordering,
const HloInstruction* a,
const HloInstruction* b) {
EXPECT_FALSE(a->shape().IsTuple());
EXPECT_FALSE(b->shape().IsTuple());
return ordering.MayInterfere(analysis_->GetValueDefinedAt(a),
analysis_->GetValueDefinedAt(b), *analysis_);
}
std::unique_ptr<HloComputation> CreateR0F32UnaryOpComputation(
HloOpcode opcode) {
HloComputation::Builder builder(
absl::StrCat(TestName(), ".", HloOpcodeString(opcode)));
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, opcode, param0));
return builder.Build();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloDataflowAnalysis> analysis_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
const Shape vector_shape_ = ShapeUtil::MakeShape(F32, {42});
const Shape tuple_shape_ = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})});
};
TEST_P(HloDataflowAnalysisTest, BinaryOperation) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).positions(),
UnorderedElementsAre(HloPosition{constant1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).positions(),
UnorderedElementsAre(HloPosition{constant2, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(add).positions(),
UnorderedElementsAre(HloPosition{add, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{add, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{add, 1, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).GetUses().empty());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, TupleAndGtes) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param0));
EXPECT_TRUE(analysis.ValueIsDefinedAt(param1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(gte0));
EXPECT_FALSE(analysis.ValueIsDefinedAt(gte1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_THAT(
analysis.GetValueDefinedAt(param0).positions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{gte0, {}}));
EXPECT_THAT(
analysis.GetValueDefinedAt(param1).positions(),
UnorderedElementsAre(HloPosition{param1, {}}, HloPosition{tuple, {1}},
HloPosition{gte1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(tuple).positions(),
UnorderedElementsAre(HloPosition{tuple, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(param0).GetUses(),
UnorderedElementsAre(HloUse{add, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(param1).GetUses(),
UnorderedElementsAre(HloUse{add, 1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(tuple, {}).GetUses(),
UnorderedElementsAre(HloUse{gte0, 0, {}}, HloUse{gte1, 0, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, NestedTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto nested_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({tuple, tuple, constant1}));
auto gte_tuple = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple->shape(), nested_tuple, 1));
auto gte_out = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, gte_tuple, 0));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_THAT(
analysis.GetValueDefinedAt(constant1).positions(),
UnorderedElementsAre(
HloPosition{constant1, {}}, HloPosition{tuple, {0}},
HloPosition{nested_tuple, {0, 0}}, HloPosition{nested_tuple, {1, 0}},
HloPosition{nested_tuple, {2}}, HloPosition{gte_tuple, {0}},
HloPosition{gte_out, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1, {}).GetUses(),
UnorderedElementsAre(HloUse{gte_out, 0, {0}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).GetUses().empty());
EXPECT_THAT(analysis.GetValueDefinedAt(tuple, {}).GetUses(),
UnorderedElementsAre(HloUse{gte_out, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(nested_tuple, {}).GetUses(),
UnorderedElementsAre(HloUse{gte_tuple, 0, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
EXPECT_FALSE(
analysis.GetValueDefinedAt(tuple, {}).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(nested_tuple, {})
.live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, SingleCall) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_FALSE(analysis.ValueIsDefinedAt(subparam0));
EXPECT_FALSE(analysis.ValueIsDefinedAt(subparam1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_FALSE(analysis.ValueIsDefinedAt(call));
EXPECT_EQ(analysis.GetUniqueValueAt(subparam0),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(subparam1),
analysis.GetValueDefinedAt(constant2));
EXPECT_EQ(analysis.GetUniqueValueAt(call), analysis.GetValueDefinedAt(add));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{add, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{add, 1, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, NestedCalls) {
auto inner_builder = HloComputation::Builder("InnerComputation");
auto inner_param0 = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto inner_param1 = inner_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_param0, inner_param1));
HloComputation* inner_computation =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("OuterComputation");
auto outer_param0 = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto outer_param1 = outer_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto nested_call = outer_builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {outer_param1, outer_param0}, inner_computation));
HloComputation* outer_computation =
module_->AddEmbeddedComputation(outer_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, outer_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_THAT(
analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{nested_call, 1, {}},
HloUse{add, 1, {}}));
EXPECT_THAT(
analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{nested_call, 0, {}},
HloUse{add, 0, {}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, SingleWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_root = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_FALSE(analysis.GetValueDefinedAt(cond_constant).live_out_of_module());
if (ssa_form) {
EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, {1}).is_phi());
EXPECT_THAT(
analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{add, 0, {}}, HloUse{body_root, 0, {}},
HloUse{xla_while, 0, {0}}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {1})
.live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(add).live_out_of_module());
} else {
EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(xla_while, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(cond_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module());
}
}
TEST_P(HloDataflowAnalysisTest, SequentialWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
auto xla_while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0));
auto xla_while2 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.GetUniqueValueAt(xla_while0, {0}),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(xla_while1, {0}),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(xla_while2, {0}),
analysis.GetValueDefinedAt(constant1));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, MultiLevelNestedWhile) {
const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_});
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto level0_builder = HloComputation::Builder("level0_body");
auto level0_param = level0_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto level0_element_0 = level0_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, level0_param, 0));
auto level0_root = level0_builder.AddInstruction(
HloInstruction::CreateTuple({level0_element_0}));
HloComputation* level0_body =
module_->AddEmbeddedComputation(level0_builder.Build());
auto level1_builder = HloComputation::Builder("level1_body");
auto level1_param = level1_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto level1_root = level1_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition, level0_body, level1_param));
HloComputation* level1_body =
module_->AddEmbeddedComputation(level1_builder.Build());
auto level2_builder = HloComputation::Builder("level2_body");
auto level2_param = level2_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto level2_while = level2_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition, level1_body, level2_param));
auto level2_element_0 = level2_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, level2_while, 0));
auto negate = level2_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, level2_element_0));
level2_builder.AddInstruction(HloInstruction::CreateTuple({negate}));
HloComputation* level2_body =
module_->AddEmbeddedComputation(level2_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({constant1}));
builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, level2_body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
if (!ssa_form) {
return;
}
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_FALSE(analysis.ValueIsDefinedAt(level1_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(level0_param, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(level1_root, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(level0_root, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level1_param, {0}),
HloValuesAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level0_param, {0}),
HloValuesAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level1_root, {0}),
HloValuesAt(level2_param, {0}));
EXPECT_EQ(HloValuesAt(level0_root, {0}),
HloValuesAt(level2_param, {0}));
}
TEST_P(HloDataflowAnalysisTest, NestedWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto inner_builder = HloComputation::Builder("inner_body");
auto inner_param = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto inner_element_0 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0));
auto inner_element_1 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1));
inner_builder.AddInstruction(
HloInstruction::CreateTuple({inner_element_0, add}));
HloComputation* inner_body =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("outer_body");
auto outer_param = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto outer_element_0 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 0));
auto negate = outer_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, outer_element_0));
auto outer_element_1 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 1));
auto outer_tuple = outer_builder.AddInstruction(
HloInstruction::CreateTuple({negate, outer_element_1}));
auto nested_while = outer_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition, inner_body, outer_tuple));
HloComputation* outer_body =
module_->AddEmbeddedComputation(outer_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto entry_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, outer_body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_THAT(HloValuesAt(inner_param, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(negate)));
if (ssa_form) {
EXPECT_TRUE(analysis.ValueIsDefinedAt(inner_param, {1}));
EXPECT_TRUE(
analysis.GetValueDefinedAt(inner_param, {1}).is_phi());
EXPECT_FALSE(analysis.ValueIsDefinedAt(nested_while, {0}));
EXPECT_THAT(HloValuesAt(inner_param, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(negate)));
EXPECT_TRUE(analysis.ValueIsDefinedAt(nested_while, {1}));
EXPECT_TRUE(
analysis.GetValueDefinedAt(nested_while, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(entry_while, {0}));
EXPECT_TRUE(
analysis.GetValueDefinedAt(entry_while, {0}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(entry_while, {1}));
EXPECT_TRUE(
analysis.GetValueDefinedAt(entry_while, {1}).is_phi());
} else {
EXPECT_THAT(HloValuesAt(inner_param, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(add),
&analysis.GetValueDefinedAt(constant2)));
EXPECT_THAT(HloValuesAt(nested_while, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(negate)));
EXPECT_THAT(HloValuesAt(nested_while, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(add),
&analysis.GetValueDefinedAt(constant2)));
EXPECT_THAT(HloValuesAt(entry_while, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(negate),
&analysis.GetValueDefinedAt(constant1)));
EXPECT_THAT(HloValuesAt(entry_while, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(add),
&analysis.GetValueDefinedAt(constant2)));
}
}
TEST_P(HloDataflowAnalysisTest, SwizzlingWhileSharedInput) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_1, body_element_0}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant1}));
builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_FALSE(analysis.ValueIsDefinedAt(body_param, {0}));
}
TEST_P(HloDataflowAnalysisTest, SwizzlingWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_1, body_element_0}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
if (ssa_form) {
EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, {0}));
EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, {0}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(body_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(body_param, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, {0}));
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {0}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(xla_while, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {1}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, {0}));
EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, {0}).is_phi());
EXPECT_TRUE(analysis.ValueIsDefinedAt(cond_param, {1}));
EXPECT_TRUE(analysis.GetValueDefinedAt(cond_param, {1}).is_phi());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {})
.live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {0})
.live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, {1})
.live_out_of_module());
} else {
EXPECT_THAT(HloValuesAt(xla_while, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(constant1),
&analysis.GetValueDefinedAt(constant2)));
EXPECT_THAT(HloValuesAt(xla_while, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(constant1),
&analysis.GetValueDefinedAt(constant2)));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
}
}
TEST_P(HloDataflowAnalysisTest, ArraySelect) {
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto select = builder.AddInstruction(HloInstruction::CreateTernary(
scalar_shape_, HloOpcode::kSelect, pred, constant1, constant2));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_TRUE(analysis.ValueIsDefinedAt(select));
EXPECT_FALSE(analysis.GetValueDefinedAt(constant1).live_out_of_module());
EXPECT_FALSE(analysis.GetValueDefinedAt(constant2).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(select).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, BitcastDefinesValue) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(scalar_shape_, constant));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
{
const HloDataflowAnalysis& analysis =
RunAnalysis(ssa_form, true);
EXPECT_EQ(analysis.values().size(), 2);
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant));
EXPECT_TRUE(analysis.ValueIsDefinedAt(bitcast));
EXPECT_FALSE(analysis.GetValueDefinedAt(constant).live_out_of_module());
EXPECT_TRUE(analysis.GetValueDefinedAt(bitcast).live_out_of_module());
}
{
const HloDataflowAnalysis& analysis =
RunAnalysis(ssa_form, false);
EXPECT_EQ(analysis.values().size(), 1);
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant));
EXPECT_FALSE(analysis.ValueIsDefinedAt(bitcast));
EXPECT_TRUE(analysis.GetValueDefinedAt(constant).live_out_of_module());
}
}
TEST_P(HloDataflowAnalysisTest, TupleCopy) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(tuple->shape(), HloOpcode::kCopy, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param0));
EXPECT_TRUE(analysis.ValueIsDefinedAt(param1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(copy, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(copy, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(copy, {1}));
EXPECT_THAT(HloValuesAt(copy, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param0)));
EXPECT_THAT(HloValuesAt(copy, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param1)));
EXPECT_TRUE(
analysis.GetValueDefinedAt(copy, {}).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, OptimizationBarrier) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto barrier = builder.AddInstruction(HloInstruction::CreateUnary(
tuple->shape(), HloOpcode::kOptimizationBarrier, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param0));
EXPECT_TRUE(analysis.ValueIsDefinedAt(param1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(tuple, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(barrier, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(barrier, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(barrier, {1}));
EXPECT_THAT(HloValuesAt(barrier, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param0)));
EXPECT_THAT(HloValuesAt(barrier, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param1)));
}
TEST_P(HloDataflowAnalysisTest, CopyStartAndCopyDone) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto copy_start = builder.AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({constant->shape(), constant->shape(),
ShapeUtil::MakeShape(U32, {})}),
constant));
auto copy_done = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopyDone, copy_start));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_TRUE(analysis.ValueIsDefinedAt(copy_start, {}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(copy_start, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(copy_start, {1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(copy_start, {2}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(copy_done, {}));
EXPECT_THAT(
HloValuesAt(copy_done, {}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(copy_start, {0})));
EXPECT_TRUE(analysis.GetValueDefinedAt(copy_start, {0})
.live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, AsyncOps) {
std::string hlo_str = R"(
HloModule module
ENTRY entry {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
ROOT async-done = f32[2,3] custom-call-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest()));
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
const HloInstruction* param =
module_->entry_computation()->parameter_instruction(0);
const HloInstruction* async_start =
FindInstruction(module_.get(), "async-start");
const HloInstruction* async_update =
FindInstruction(module_.get(), "async-update");
const HloInstruction* async_done =
FindInstruction(module_.get(), "async-done");
const HloInstruction* async_wrapped_instruction =
async_start->async_wrapped_instruction();
EXPECT_TRUE(analysis.ValueIsDefinedAt(async_start, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(async_start, {0, 0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(async_start, {1}));
EXPECT_THAT(HloValuesAt(async_start, {1}),
UnorderedElementsAre(
&analysis.GetValueDefinedAt(async_wrapped_instruction, {})));
EXPECT_TRUE(analysis.ValueIsDefinedAt(async_start, {2}));
EXPECT_THAT(HloValuesAt(async_start, {0, 0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param, {})));
EXPECT_TRUE(analysis.GetValueDefinedAt(async_wrapped_instruction, {})
.live_out_of_module());
EXPECT_TRUE(analysis.ValueIsDefinedAt(async_update, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(async_update, {0, 0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(async_update, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(async_update, {2}));
EXPECT_THAT(HloValuesAt(async_update, {0, 0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param, {})));
EXPECT_THAT(HloValuesAt(async_update, {1}),
UnorderedElementsAre(
&analysis.GetValueDefinedAt(async_wrapped_instruction, {})));
EXPECT_FALSE(analysis.ValueIsDefinedAt(async_done, {}));
EXPECT_THAT(HloValuesAt(async_done, {}),
UnorderedElementsAre(
&analysis.GetValueDefinedAt(async_wrapped_instruction, {})));
}
TEST_P(HloDataflowAnalysisTest, AsyncCall) {
std::string hlo_str = R"(
HloModule AsyncCall
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_1)
}
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), to_apply=%called_computation
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %a)
%async-update = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-update(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %b)
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_2, f32[4096]{0} %negate_3)
%async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-update)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest()));
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
const HloInstruction* a = FindInstruction(module_.get(), "a");
const HloInstruction* b = FindInstruction(module_.get(), "b");
const HloInstruction* async_done =
FindInstruction(module_.get(), "async-done");
for (std::string async_name : {"async-start", "async-update", "async-done"}) {
const HloInstruction* async_op = FindInstruction(module_.get(), async_name);
const HloComputation* called_computation =
async_op->async_wrapped_instruction()->called_computations()[0];
const HloInstruction* parameter0 =
called_computation->parameter_instruction(0);
EXPECT_FALSE(analysis.ValueIsDefinedAt(parameter0));
EXPECT_THAT(HloValuesAt(parameter0),
UnorderedElementsAre(&analysis.GetValueDefinedAt(a)));
const HloInstruction* parameter1 =
called_computation->parameter_instruction(1);
EXPECT_FALSE(analysis.ValueIsDefinedAt(parameter1));
EXPECT_THAT(HloValuesAt(parameter1),
UnorderedElementsAre(&analysis.GetValueDefinedAt(b)));
const HloInstruction* root = called_computation->root_instruction();
EXPECT_TRUE(analysis.ValueIsDefinedAt(root));
EXPECT_THAT(HloValuesAt(async_done),
UnorderedElementsAre(&analysis.GetValueDefinedAt(root)));
}
}
TEST_P(HloDataflowAnalysisTest, TupleShapedAsyncOp) {
std::string hlo_str = R"(
HloModule module
ENTRY entry {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), (f32[2,3], f32[2,3]), u32[]) custom-call-start(p0), custom_call_target="foo"
async-update = ((f32[2,3]), (f32[2,3], f32[2,3]), u32[]) custom-call-update(async-start)
ROOT async-done = (f32[2,3], f32[2,3]) custom-call-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest()));
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
const HloInstruction* async_start =
FindInstruction(module_.get(), "async-start");
const HloInstruction* async_update =
FindInstruction(module_.get(), "async-update");
const HloInstruction* async_done =
FindInstruction(module_.get(), "async-done");
EXPECT_TRUE(analysis.ValueIsDefinedAt(async_start, {1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(async_update, {1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(async_done));
}
TEST_P(HloDataflowAnalysisTest, SendAndSendDone) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
HloInstruction::CreateSend(param, token, 0));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 6);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(send, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send, {1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send, {2}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(send_done));
EXPECT_THAT(HloValuesAt(send, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param)));
}
TEST_P(HloDataflowAnalysisTest, SetDimensionSizeCreatesValue) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param"));
auto size = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(3)));
auto sds = builder.AddInstruction(
HloInstruction::CreateSetDimensionSize(vector_shape_, param, size, 0));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
{
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_TRUE(analysis.ValueIsDefinedAt(param));
EXPECT_TRUE(analysis.ValueIsDefinedAt(sds));
EXPECT_TRUE(analysis.GetValueDefinedAt(sds).live_out_of_module());
}
}
TEST_P(HloDataflowAnalysisTest, RecvAndRecvDone) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto recv = builder.AddInstruction(
HloInstruction::CreateRecv(scalar_shape_, token, 0));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form);
EXPECT_EQ(analysis.values().size(), 7);
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, {}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, {1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv, {2}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv_done, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(recv_done, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(recv_done, {1}));
EXPECT_THAT(HloValuesAt(recv_done, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(recv, {0})));
EXPECT_TRUE(
analysis.GetValueDefinedAt(recv, {0}).live_out_of_module());
}
TEST_P(HloDataflowAnalysisTest, ElementwiseChainInterference) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, negate));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kLog, exp));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
RunAnalysis(GetParam());
DependencyHloOrdering ordering(module_.get());
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, log));
EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, log));
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, log));
EXPECT_FALSE(InstructionsMayInterfere(ordering, log, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, log, exp));
EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, exp));
}
TEST_P(HloDataflowAnalysisTest, MultipleEntryParameters_Sequential) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, vector_shape_, "param1"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param0));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param1));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
vector_shape_, HloOpcode::kAdd, negate, exp));
auto entry = module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
RunAnalysis(GetParam());
HloSchedule schedule(module_.get());
schedule.set_sequence(entry, {param0, negate, param1, exp, add});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(InstructionsMayInterfere(ordering, param0, param1));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param0, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param0, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param0, add));
EXPECT_TRUE(InstructionsMayInterfere(ordering, param1, param0));
EXPECT_TRUE(InstructionsMayInterfere(ordering, param1, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param1, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param1, add));
EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, exp));
EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, exp));
}
TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) {
auto body_builder = HloComputation::Builder(TestName());
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "body_param"));
auto constant = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto exp = body_builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kExp, constant));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, exp, body_param));
auto dead_constant = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto dead_negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, dead_constant));
HloComputation* body = module_->AddEmbeddedComputation(
body_builder.Build(add));
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "cond_param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape_, condition, body, param));
auto entry = module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
bool ssa_form = GetParam();
RunAnalysis(ssa_form, false,
false);
HloSchedule schedule(module_.get());
schedule.set_sequence(entry, {param, xla_while});
schedule.set_sequence(condition, {cond_param, cond_constant});
schedule.set_sequence(
body, {constant, exp, body_param, add, dead_constant, dead_negate});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(InstructionsMayInterfere(ordering, add, dead_constant));
EXPECT_TRUE(InstructionsMayInterfere(ordering, add, dead_negate));
if (ssa_form) {
EXPECT_TRUE(InstructionsMayInterfere(ordering, body_param, constant));
EXPECT_TRUE(InstructionsMayInterfere(ordering, body_param, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, body_param));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, cond_param));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, xla_while));
EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, cond_param));
EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, body_param, xla_while));
EXPECT_FALSE(InstructionsMayInterfere(ordering, cond_param, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, cond_param, xla_while));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, xla_while));
}
}
TEST_P(HloDataflowAnalysisTest, NonElementwiseOperand) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, exp));
auto reverse = builder.AddInstruction(
HloInstruction::CreateReverse(vector_shape_, negate, {0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
RunAnalysis(GetParam());
DependencyHloOrdering ordering(module_.get());
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, reverse));
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, negate));
EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, reverse));
}
TEST_P(HloDataflowAnalysisTest, OverlappedValues) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
vector_shape_, HloOpcode::kAdd, negate, exp));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
RunAnalysis(GetParam());
DependencyHloOrdering ordering(module_.get());
EXPECT_TRUE(InstructionsMayInterfere(ordering, param, negate));
EXPECT_TRUE(InstructionsMayInterfere(ordering, param, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, add));
EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, exp));
EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, exp));
}
TEST_P(HloDataflowAnalysisTest, OverlappedValuesSequentialOrder) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
vector_shape_, HloOpcode::kAdd, negate, exp));
auto entry = module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
RunAnalysis(GetParam());
HloSchedule schedule(module_.get());
schedule.set_sequence(entry, {param, negate, exp, add});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(InstructionsMayInterfere(ordering, param, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, add));
EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, exp));
EXPECT_TRUE(InstructionsMayInterfere(ordering, exp, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, negate, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, add));
EXPECT_FALSE(InstructionsMayInterfere(ordering, add, exp));
}
TEST_P(HloDataflowAnalysisTest, EmbeddedComputationInterference) {
auto embedded_builder = HloComputation::Builder(TestName() + "_embedded");
auto embedded_param = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "embedded_param"));
auto embedded_log =
embedded_builder.AddInstruction(HloInstruction::CreateUnary(
vector_shape_, HloOpcode::kLog, embedded_param));
auto embedded_computation =
module_->AddEmbeddedComputation(embedded_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vector_shape_, "param"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kNegate, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vector_shape_, HloOpcode::kExp, param));
auto call = builder.AddInstruction(
HloInstruction::CreateCall(vector_shape_, {exp}, embedded_computation));
builder.AddInstruction(HloInstruction::CreateBinary(
vector_shape_, HloOpcode::kAdd, negate, call));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
RunAnalysis(GetParam());
DependencyHloOrdering ordering(module_.get());
EXPECT_FALSE(InstructionsMayInterfere(ordering, exp, embedded_log));
EXPECT_TRUE(InstructionsMayInterfere(ordering, negate, embedded_log));
}
TEST_P(HloDataflowAnalysisTest, GetFlattenedValueSet) {
const char* hlo_text = R"(
HloModule test_aliasing_module
ENTRY root {
param = s32[1000] parameter(0)
p0 = s32[1000] copy(param)
p1 = s32[1000] copy(param)
ROOT t = (s32[1000], s32[1000]) tuple(p0, p1)
})";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text));
auto entry = module_->entry_computation();
entry->GetInstructionWithName("t");
auto& dataflow_analysis = RunAnalysis(GetParam());
auto set = dataflow_analysis.GetFlattenedValueSet(
entry->GetInstructionWithName("t"));
EXPECT_EQ(set.values().size(), 3);
}
TEST_P(HloDataflowAnalysisTest, ConditionalWithIdentity) {
auto true_builder = HloComputation::Builder(TestName() + "_true");
auto true_param = true_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "true_param"));
HloComputation* true_computation =
module_->AddEmbeddedComputation(true_builder.Build());
auto false_builder = HloComputation::Builder(TestName() + "_false");
auto false_param = false_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "false_param"));
HloComputation* false_computation =
module_->AddEmbeddedComputation(false_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape_, pred, constant1, true_computation, constant2,
false_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloDataflowAnalysis& analysis = RunAnalysis(GetParam());
EXPECT_TRUE(analysis.ValueIsDefinedAt(pred));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_FALSE(analysis.ValueIsDefinedAt(true_param));
EXPECT_FALSE(analysis.ValueIsDefinedAt(false_param));
EXPECT_EQ(analysis.GetUniqueValueAt(true_param),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(false_param),
analysis.GetValueDefinedAt(constant2));
EXPECT_THAT(analysis.GetValueDefinedAt(pred).GetUses(),
ElementsAre(HloUse{conditional, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).GetUses(),
ElementsAre(HloUse{conditional, 1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).GetUses(),
ElementsAre(HloUse{conditional, 2, {}}));
bool ssa_form = GetParam();
if (ssa_form) {
EXPECT_EQ(analysis.values().size(), 4);
EXPECT_TRUE(analysis.ValueIsDefinedAt(conditional));
} else {
EXPECT_EQ(analysis.values().size(), 3);
EXPECT_FALSE(analysis.ValueIsDefinedAt(conditional));
EXPECT_THAT(HloValuesAt(conditional),
UnorderedElementsAre(&analysis.GetValueDefinedAt(constant1),
&analysis.GetValueDefinedAt(constant2)));
}
}
TEST_P(HloDataflowAnalysisTest, ConditionalTakingTupleOperand) {
auto true_builder = HloComputation::Builder(TestName() + "_true");
auto true_param = true_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "true_param"));
auto true_x = true_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, true_param, 0));
auto true_y = true_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, true_param, 1));
auto add = true_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, true_x, true_y));
HloComputation* true_computation =
module_->AddEmbeddedComputation(true_builder.Build());
auto false_builder = HloComputation::Builder(TestName() + "_false");
auto false_param = false_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape_, "false_param"));
auto false_x = false_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, false_param, 0));
auto false_y = false_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, false_param, 1));
auto sub = false_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kSubtract, false_x, false_y));
HloComputation* false_computation =
module_->AddEmbeddedComputation(false_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
auto tuple_operand = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape_, pred, tuple_operand, true_computation, tuple_operand,
false_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloDataflowAnalysis& analysis = RunAnalysis(GetParam());
EXPECT_TRUE(analysis.ValueIsDefinedAt(pred));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple_operand));
EXPECT_TRUE(analysis.ValueIsDefinedAt(add));
EXPECT_TRUE(analysis.ValueIsDefinedAt(sub));
EXPECT_FALSE(analysis.ValueIsDefinedAt(true_param));
EXPECT_FALSE(analysis.ValueIsDefinedAt(false_param));
EXPECT_FALSE(analysis.ValueIsDefinedAt(true_x));
EXPECT_FALSE(analysis.ValueIsDefinedAt(true_y));
EXPECT_FALSE(analysis.ValueIsDefinedAt(false_x));
EXPECT_FALSE(analysis.ValueIsDefinedAt(false_y));
EXPECT_EQ(analysis.GetUniqueValueAt(true_param),
analysis.GetValueDefinedAt(tuple_operand));
EXPECT_EQ(analysis.GetUniqueValueAt(false_param),
analysis.GetValueDefinedAt(tuple_operand));
EXPECT_EQ(analysis.GetUniqueValueAt(true_x),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(true_y),
analysis.GetValueDefinedAt(constant2));
EXPECT_EQ(analysis.GetUniqueValueAt(false_x),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(false_y),
analysis.GetValueDefinedAt(constant2));
EXPECT_THAT(analysis.GetValueDefinedAt(pred).GetUses(),
ElementsAre(HloUse{conditional, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant1).GetUses(),
UnorderedElementsAre(HloUse{conditional, 1, {0}},
HloUse{conditional, 2, {0}},
HloUse{add, 0, {}}, HloUse{sub, 0, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(constant2).GetUses(),
UnorderedElementsAre(HloUse{conditional, 1, {1}},
HloUse{conditional, 2, {1}},
HloUse{add, 1, {}}, HloUse{sub, 1, {}}));
EXPECT_THAT(analysis.GetValueDefinedAt(tuple_operand).GetUses(),
UnorderedElementsAre(
HloUse{conditional, 1, {}}, HloUse{conditional, 2, {}},
HloUse{true_x, 0, {}}, HloUse{true_y, 0, {}},
HloUse{false_x, 0, {}}, HloUse{false_y, 0, {}}));
bool ssa_form = GetParam();
if (ssa_form) {
EXPECT_EQ(analysis.values().size(), 7);
EXPECT_TRUE(analysis.ValueIsDefinedAt(conditional));
} else {
EXPECT_EQ(analysis.values().size(), 6);
EXPECT_FALSE(analysis.ValueIsDefinedAt(conditional));
EXPECT_THAT(HloValuesAt(conditional),
UnorderedElementsAre(&analysis.GetValueDefinedAt(add),
&analysis.GetValueDefinedAt(sub)));
}
}
TEST_P(HloDataflowAnalysisTest, NestedConditionals) {
auto computation1 = module_->AddEmbeddedComputation(
CreateR0F32UnaryOpComputation(HloOpcode::kCeil));
auto computation2 = module_->AddEmbeddedComputation(
CreateR0F32UnaryOpComputation(HloOpcode::kFloor));
auto computation3 = module_->AddEmbeddedComputation(
CreateR0F32UnaryOpComputation(HloOpcode::kNegate));
const Shape scalar_bool_shape = ShapeUtil::MakeShape(PRED, {});
const Shape tuple_param_shape = ShapeUtil::MakeTupleShape(
{scalar_bool_shape, scalar_shape_, scalar_shape_});
auto inner_builder =
HloComputation::Builder(TestName() + "_inner_conditional");
auto param_cond = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_param_shape, "param_cond"));
auto pred_cond = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_bool_shape, param_cond, 0));
auto true_operand_cond = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param_cond, 1));
auto false_operand_cond = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param_cond, 2));
auto inner_conditional =
inner_builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape_, pred_cond, true_operand_cond, computation1,
false_operand_cond, computation2));
auto inner_conditional_computation =
module_->AddEmbeddedComputation(inner_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto pred1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto pred2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.2f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.3f)));
auto tuple_operand = builder.AddInstruction(
HloInstruction::CreateTuple({pred2, constant1, constant2}));
auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape_, pred1, tuple_operand, inner_conditional_computation,
constant3, computation3));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloDataflowAnalysis& analysis = RunAnalysis(GetParam());
EXPECT_TRUE(analysis.ValueIsDefinedAt(pred1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(pred2));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant1));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant2));
EXPECT_TRUE(analysis.ValueIsDefinedAt(constant3));
EXPECT_TRUE(analysis.ValueIsDefinedAt(tuple_operand));
EXPECT_TRUE(analysis.ValueIsDefinedAt(computation1->root_instruction()));
EXPECT_TRUE(analysis.ValueIsDefinedAt(computation2->root_instruction()));
EXPECT_TRUE(analysis.ValueIsDefinedAt(computation3->root_instruction()));
auto computation1_param = computation1->parameter_instruction(0);
auto computation2_param = computation2->parameter_instruction(0);
auto computation3_param = computation3->parameter_instruction(0);
EXPECT_FALSE(analysis.ValueIsDefinedAt(computation1_param));
EXPECT_FALSE(analysis.ValueIsDefinedAt(computation2_param));
EXPECT_FALSE(analysis.ValueIsDefinedAt(computation3_param));
EXPECT_EQ(analysis.GetUniqueValueAt(computation1_param),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(computation2_param),
analysis.GetValueDefinedAt(constant2));
EXPECT_EQ(analysis.GetUniqueValueAt(computation3_param),
analysis.GetValueDefinedAt(constant3));
EXPECT_FALSE(analysis.ValueIsDefinedAt(param_cond));
EXPECT_FALSE(analysis.ValueIsDefinedAt(pred_cond));
EXPECT_FALSE(analysis.ValueIsDefinedAt(true_operand_cond));
EXPECT_FALSE(analysis.ValueIsDefinedAt(false_operand_cond));
EXPECT_EQ(analysis.GetUniqueValueAt(param_cond),
analysis.GetValueDefinedAt(tuple_operand));
EXPECT_EQ(analysis.GetUniqueValueAt(pred_cond),
analysis.GetValueDefinedAt(pred2));
EXPECT_EQ(analysis.GetUniqueValueAt(true_operand_cond),
analysis.GetValueDefinedAt(constant1));
EXPECT_EQ(analysis.GetUniqueValueAt(false_operand_cond),
analysis.GetValueDefinedAt(constant2));
bool ssa_form = GetParam();
if (ssa_form) {
EXPECT_EQ(analysis.values().size(), 11);
EXPECT_TRUE(analysis.ValueIsDefinedAt(inner_conditional));
EXPECT_TRUE(analysis.ValueIsDefinedAt(conditional));
} else {
EXPECT_EQ(analysis.values().size(), 9);
EXPECT_FALSE(analysis.ValueIsDefinedAt(inner_conditional));
EXPECT_FALSE(analysis.ValueIsDefinedAt(conditional));
EXPECT_THAT(
HloValuesAt(inner_conditional),
UnorderedElementsAre(
&analysis.GetValueDefinedAt(computation1->root_instruction()),
&analysis.GetValueDefinedAt(computation2->root_instruction())));
EXPECT_THAT(
HloValuesAt(conditional),
UnorderedElementsAre(
&analysis.GetValueDefinedAt(computation1->root_instruction()),
&analysis.GetValueDefinedAt(computation2->root_instruction()),
&analysis.GetValueDefinedAt(computation3->root_instruction())));
}
}
TEST_P(HloDataflowAnalysisTest, AddDependency) {
std::string module_string = R"(
HloModule AddDependency
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p = f32[3] parameter(0)
%token0 = token[] after-all()
ROOT %add_dep = f32[3] add-dependency(f32[3] %p, token[] %token0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string, GetModuleConfigForTest()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> analysis,
HloDataflowAnalysis::Run(*module));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kAddDependency);
EXPECT_EQ(analysis->values().size(), 2);
EXPECT_FALSE(analysis->ValueIsDefinedAt(root));
}
TEST_F(HloDataflowAnalysisTest, AllReduceStartAndDone) {
const char* hlo_text = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2] parameter(0)
start = f32[2] all-reduce-start(p0), to_apply=add
ROOT done = f32[2] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> analysis,
HloDataflowAnalysis::Run(*module));
HloInstruction* done = module->entry_computation()->root_instruction();
HloInstruction* start = done->mutable_operand(0);
HloInstruction* param0 = start->mutable_operand(0);
EXPECT_TRUE(analysis->ValueIsDefinedAt(start, {}));
EXPECT_FALSE(analysis->ValueIsDefinedAt(done));
EXPECT_THAT(analysis->GetValueDefinedAt(param0).GetUses(),
UnorderedElementsAre(HloUse{start, 0, {}}));
EXPECT_THAT(analysis->GetValueDefinedAt(start).GetUses(),
UnorderedElementsAre(HloUse{done, 0, {}}));
}
TEST_F(HloDataflowAnalysisTest, AllReduceStartAndDoneTwoOperands) {
const char* hlo_text = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2] parameter(0)
p1 = f32[2] parameter(1)
start = (f32[2], f32[2]) all-reduce-start(p0, p1), to_apply=add
ROOT done = (f32[2], f32[2]) all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> analysis,
HloDataflowAnalysis::Run(*module));
HloInstruction* done = module->entry_computation()->root_instruction();
HloInstruction* start = done->mutable_operand(0);
HloInstruction* param0 = start->mutable_operand(0);
HloInstruction* param1 = start->mutable_operand(1);
EXPECT_TRUE(analysis->ValueIsDefinedAt(start, {}));
EXPECT_TRUE(analysis->ValueIsDefinedAt(start, {0}));
EXPECT_TRUE(analysis->ValueIsDefinedAt(start, {1}));
EXPECT_FALSE(analysis->ValueIsDefinedAt(done));
EXPECT_THAT(analysis->GetValueDefinedAt(param0).GetUses(),
UnorderedElementsAre(HloUse{start, 0, {}}));
EXPECT_THAT(analysis->GetValueDefinedAt(param1).GetUses(),
UnorderedElementsAre(HloUse{start, 1, {}}));
EXPECT_THAT(analysis->GetValueDefinedAt(start, {}).GetUses(),
UnorderedElementsAre(HloUse{done, 0, {}}));
}
TEST_F(HloDataflowAnalysisTest, AllGatherStartAndDoneWithTuple) {
const char* hlo_text = R"(
HloModule test
ENTRY entry {
p0 = f32[2] parameter(0)
p1 = bf16[2] parameter(1)
start = ((f32[2], bf16[2]), (f32[4], bf16[4])) all-gather-start(p0, p1), dimensions={0}
ROOT done = (f32[4], bf16[4]) all-gather-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text));
const HloDataflowAnalysis& analysis = RunAnalysis(false);
absl::Status status = analysis.Verify();
EXPECT_TRUE(status.ok()) << status;
HloInstruction* done = module_->entry_computation()->root_instruction();
HloInstruction* start = done->mutable_operand(0);
HloInstruction* param0 = start->mutable_operand(0);
HloInstruction* param1 = start->mutable_operand(1);
EXPECT_TRUE(analysis.ValueIsDefinedAt(start, {}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(start, {0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(start, {1}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(start, {0, 0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(start, {0, 1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(start, {1, 0}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(start, {1, 1}));
EXPECT_TRUE(analysis.ValueIsDefinedAt(done, {}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(done, {0}));
EXPECT_FALSE(analysis.ValueIsDefinedAt(done, {1}));
EXPECT_THAT(
analysis.GetValueDefinedAt(param0).GetUses(),
UnorderedElementsAre(HloUse{start, 0, {}}, HloUse{done, 0, {0, 0}}));
EXPECT_THAT(
analysis.GetValueDefinedAt(param1).GetUses(),
UnorderedElementsAre(HloUse{start, 1, {}}, HloUse{done, 0, {0, 1}}));
EXPECT_THAT(HloValuesAt(start, {0, 0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param0, {})));
EXPECT_THAT(HloValuesAt(start, {0, 1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(param1, {})));
EXPECT_THAT(HloValuesAt(done, {0}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(start, {1, 0})));
EXPECT_THAT(HloValuesAt(done, {1}),
UnorderedElementsAre(&analysis.GetValueDefinedAt(start, {1, 1})));
}
INSTANTIATE_TEST_SUITE_P(HloDataflowAnalysisInstantiation,
HloDataflowAnalysisTest,
::testing::Values(false, true));
std::unique_ptr<HloDataflowAnalysis> RunAnalysis(
const HloModule& module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
return HloDataflowAnalysis::Run(module, false,
false,
can_share_buffer)
.value();
}
using DoesNotUseOperandBufferTest = HloTestBase;
TEST_F(DoesNotUseOperandBufferTest, GetTupleElement) {
auto builder = HloComputation::Builder(TestName());
Shape elem_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({elem_shape, elem_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(elem_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(elem_shape, tuple, 1));
builder.AddInstruction(
HloInstruction::CreateBinary(elem_shape, HloOpcode::kAdd, gte0, gte1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {0}, gte0));
EXPECT_TRUE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {1}, gte1));
EXPECT_FALSE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {}, gte0));
EXPECT_FALSE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {}, gte1));
}
TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));
auto starts = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update,
std::initializer_list<HloInstruction*>({starts})));
builder.AddInstruction(
HloInstruction::CreateTuple({gte0, dynamic_update_slice}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{dynamic_update_slice, starts, update, gte1},
HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {0}, fusion));
EXPECT_FALSE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {1}, fusion));
}
TEST_F(DoesNotUseOperandBufferTest, IndirectUses) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple"));
auto t0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple_param, 0));
auto t1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple_param, 1));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({t1, t0}));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));
auto starts = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update,
std::initializer_list<HloInstruction*>({starts})));
builder.AddInstruction(
HloInstruction::CreateTuple({gte0, dynamic_update_slice}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{dynamic_update_slice, starts, update, gte1},
HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {0}, fusion));
EXPECT_FALSE(dataflow_analysis->DoesNotUseOperandBuffer(tuple, {1}, fusion));
EXPECT_TRUE(
dataflow_analysis->DoesNotUseOperandBuffer(tuple_param, {1}, fusion));
EXPECT_FALSE(
dataflow_analysis->DoesNotUseOperandBuffer(tuple_param, {0}, fusion));
}
using CanShareOperandBufferWithUserTest = HloTestBase;
TEST_F(CanShareOperandBufferWithUserTest, ElementWiseSameShape) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {8});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kLog, exp));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(param, {}, exp, {}));
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(exp, {}, log, {}));
}
TEST_F(CanShareOperandBufferWithUserTest,
NonElementwiseLoopFusionCantAliasOperandBuffer) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "param0"));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(data_shape, HloOpcode::kNegate, param0));
auto reverse = builder.AddInstruction(
HloInstruction::CreateReverse(data_shape, neg, {0, 1}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{reverse, neg}, HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(param0, {}, fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest,
MultiOutputFusionCanAliasOperandBuffer) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
Shape in_shape = ShapeUtil::MakeShape(F32, {8});
Shape out_shape = ShapeUtil::MakeShape(PRED, {8});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, in_shape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, in_shape, "param1"));
auto copy0 = builder.AddInstruction(
HloInstruction::CreateUnary(in_shape, HloOpcode::kCopy, param0));
auto copy1 = builder.AddInstruction(
HloInstruction::CreateUnary(in_shape, HloOpcode::kCopy, param1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({copy1, copy0}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{tuple, copy1, copy0}, HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {0}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {1}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {0}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {1}));
}
TEST_F(CanShareOperandBufferWithUserTest,
ElementwiseLoopFusionCantAliasOperandBuffer) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {}));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(data_shape, HloOpcode::kNegate, operand));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(data_shape, HloOpcode::kExp, neg));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{exp, neg}, HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(operand, {},
fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest,
CanShareOperandWhenDynamicUpdateSliceIsFedByDynamicSliceWithSameIndex) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
Shape slice_shape = ShapeUtil::MakeShape(F32, {1, 2});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "param0"));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(0)));
auto ds = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape, param, {zero, zero}, {1, 2}));
auto dus = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, param, ds, {zero, zero}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{dus, ds, zero}, HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(param, {}, fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, DUSWithSliceWithSameIndices) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[10,20,30] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
slice = f32[1,1,30] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,1,30}
ROOT dus = f32[10,20,30] dynamic-update-slice(p0, slice, p1, p2, p3)
}
ENTRY test {
p0 = f32[10,20,30] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT fusion = f32[10,20,30] fusion(p0, p1, p2, p3), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
auto* fusion = module->entry_computation()->root_instruction();
auto* param = module->entry_computation()->parameter_instruction(0);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(param, {}, fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, ElementWiseDifferentShape) {
auto builder = HloComputation::Builder(TestName());
Shape in_shape = ShapeUtil::MakeShape(F32, {8});
Shape out_shape = ShapeUtil::MakeShape(PRED, {8});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, in_shape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, in_shape, "param1"));
auto result = builder.AddInstruction(HloInstruction::CreateCompare(
out_shape, param0, param1, ComparisonDirection::kEq));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(param0, {}, result, {}));
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(param1, {}, result, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, CopyShares) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {8});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(param, {}, exp, {}));
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(exp, {}, copy, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, FusedDynamicUpdateSlice) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));
auto starts = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update,
std::initializer_list<HloInstruction*>({starts})));
builder.AddInstruction(
HloInstruction::CreateTuple({gte0, dynamic_update_slice}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{dynamic_update_slice, starts, update, gte1},
HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(tuple, {0}, fusion, {}));
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(tuple, {1}, fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest,
FusedDynamicUpdateSliceWithConvertCanShare) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
Shape data_shape_bf16 = ShapeUtil::MakeShape(BF16, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));
auto convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(data_shape_bf16, gte1));
auto starts = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape_bf16, convert1, update,
std::initializer_list<HloInstruction*>({starts})));
auto convert2 = builder.AddInstruction(
HloInstruction::CreateConvert(data_shape, dynamic_update_slice));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, convert2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{convert2, dynamic_update_slice, starts, update, convert1},
HloInstruction::FusionKind::kLoop);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(gte1, {}, fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, DynamicUpdateSliceCanShare) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {1, 8});
Shape update_shape = ShapeUtil::MakeShape(F32, {1, 4});
Shape starts_shape = ShapeUtil::MakeShape(S32, {2});
auto data = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "data"));
auto update = builder.AddInstruction(
HloInstruction::CreateParameter(1, update_shape, "update"));
auto start = builder.AddInstruction(
HloInstruction::CreateParameter(2, starts_shape, "start"));
auto dus = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, data, update, {start}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(data, {}, dus, {}));
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(update, {}, dus, {}));
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(start, {}, dus, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, ScatterCanShare) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
auto computation = module->entry_computation();
auto dataflow_analysis = RunAnalysis(*module);
HloInstruction* operand_param = computation->parameter_instruction(0);
HloInstruction* indices_param = computation->parameter_instruction(1);
HloInstruction* updates_param = computation->parameter_instruction(2);
HloInstruction* scatter = computation->root_instruction();
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(
operand_param, {}, scatter, {}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
indices_param, {}, scatter, {}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
updates_param, {}, scatter, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, MultioutputScatterCanShare) {
const char* hlo_text = R"(
HloModule MultioutputScatter
update {
lhs0 = s32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = s32[] parameter(2)
rhs1 = f32[] parameter(3)
ROOT tuple = tuple(rhs0, rhs1)
}
ENTRY main {
operand0 = s32[3,3] parameter(0)
operand1 = f32[3,3] parameter(1)
indices = s32[2] parameter(2)
updates0 = s32[2,3] parameter(3)
updates1 = f32[2,3] parameter(4)
ROOT scatter = (s32[3,3], f32[3,3])
scatter(operand0, operand1, indices, updates0, updates1),
to_apply=update,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
auto computation = module->entry_computation();
auto dataflow_analysis = RunAnalysis(*module);
HloInstruction* operand0_param = computation->parameter_instruction(0);
HloInstruction* operand1_param = computation->parameter_instruction(1);
HloInstruction* indices_param = computation->parameter_instruction(2);
HloInstruction* updates0_param = computation->parameter_instruction(3);
HloInstruction* updates1_param = computation->parameter_instruction(4);
HloInstruction* scatter = computation->root_instruction();
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(
operand0_param, {}, scatter, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
operand0_param, {}, scatter, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
operand1_param, {}, scatter, {0}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(
operand1_param, {}, scatter, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
indices_param, {}, scatter, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
indices_param, {}, scatter, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
updates0_param, {}, scatter, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
updates0_param, {}, scatter, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
updates1_param, {}, scatter, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
updates1_param, {}, scatter, {1}));
}
TEST_F(CanShareOperandBufferWithUserTest, TriangularSolveCanShare) {
const char* hlo_text = R"(
HloModule TensorFlowTriangularSolve
ENTRY main {
a = f32[4,4]{1,0} parameter(0)
b = f32[3,4]{1,0} parameter(1)
ROOT triangular-solve = f32[3,4]{1,0} triangular-solve(a, b), lower=true,
transpose_a=NO_TRANSPOSE
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
auto computation = module->entry_computation();
auto dataflow_analysis = RunAnalysis(*module);
HloInstruction* lhs_param = computation->parameter_instruction(0);
HloInstruction* rhs_param = computation->parameter_instruction(1);
HloInstruction* triangular_solve = computation->root_instruction();
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(
lhs_param, {}, triangular_solve, {}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(
rhs_param, {}, triangular_solve, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, SortCanShare) {
auto builder = HloComputation::Builder(TestName());
auto module = CreateNewVerifiedModule();
Shape keys_shape = ShapeUtil::MakeShape(F32, {8});
auto keys = builder.AddInstruction(
HloInstruction::CreateParameter(0, keys_shape, "keys"));
TF_ASSERT_OK_AND_ASSIGN(
auto* sort, MakeSortHlo(keys_shape, {keys}, -1, false,
&builder, module.get()));
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(keys, {}, sort, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, SortCanShareWithTupleUser) {
auto builder = HloComputation::Builder(TestName());
auto module = CreateNewVerifiedModule();
Shape keys_shape = ShapeUtil::MakeShape(F32, {8});
Shape values_shape = ShapeUtil::MakeShape(F32, {8});
auto keys = builder.AddInstruction(
HloInstruction::CreateParameter(0, keys_shape, "keys"));
auto values = builder.AddInstruction(
HloInstruction::CreateParameter(1, values_shape, "values"));
TF_ASSERT_OK_AND_ASSIGN(
auto* sort,
MakeSortHlo(ShapeUtil::MakeTupleShape({keys_shape, values_shape}),
{keys, values}, 0, false, &builder,
module.get()));
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(keys, {}, sort, {0}));
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(values, {}, sort, {1}));
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(keys, {}, sort, {1}));
EXPECT_FALSE(
dataflow_analysis->CanShareOperandBufferWithUser(values, {}, sort, {0}));
}
TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto a = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
auto b = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(data_shape, a, b, dot_dnums, precision_config));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {}));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kAdd, dot, add_operand));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{add, dot}, HloInstruction::FusionKind::kOutput);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(add_operand, {},
fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {}));
auto reverse = builder.AddInstruction(
HloInstruction::CreateReverse(data_shape, operand, {0, 1}));
auto two = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, reverse, two));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{add, two, reverse}, HloInstruction::FusionKind::kOutput);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(operand, {},
fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, FusionCanShareBufferCustomized) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {}));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kMultiply, operand, operand));
auto two = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, mul, two));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{add, two, mul}, HloInstruction::FusionKind::kInput);
auto dataflow_analysis = RunAnalysis(
*module,
[](const HloInstruction* fusion,
const HloInstruction*, const ShapeIndex&) {
return fusion->IsLoopFusion();
});
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(operand, {},
fusion, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, WhileCanShare) {
auto module = CreateNewVerifiedModule();
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
Shape pred_scalar_shape = ShapeUtil::MakeShape(PRED, {});
auto b = HloComputation::Builder(TestName() + ".And");
auto p0 = b.AddInstruction(
HloInstruction::CreateParameter(0, pred_scalar_shape, "p0"));
auto p1 = b.AddInstruction(
HloInstruction::CreateParameter(1, pred_scalar_shape, "p1"));
b.AddInstruction(
HloInstruction::CreateBinary(pred_scalar_shape, HloOpcode::kAnd, p0, p1));
auto and_computation = module->AddEmbeddedComputation(b.Build());
auto make_cond = [&data_shape, &and_computation]() {
auto builder = HloComputation::Builder(TestName() + ".Cond");
auto data = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "data"));
auto compare = builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {8}), data, data, ComparisonDirection::kEq));
auto true_value = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
builder.AddInstruction(
HloInstruction::CreateReduce(ShapeUtil::MakeShape(PRED, {}), compare,
true_value, {0}, and_computation));
return builder.Build();
};
auto make_body = [&data_shape]() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto data = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "data"));
builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, data, data));
return builder.Build();
};
HloComputation* cond_computation =
module->AddEmbeddedComputation(make_cond());
HloComputation* body_computation =
module->AddEmbeddedComputation(make_body());
auto builder = HloComputation::Builder(TestName());
auto data = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape, "data"));
auto whil = builder.AddInstruction(HloInstruction::CreateWhile(
data_shape, cond_computation, body_computation, data));
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(data, {}, whil, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, CallToComputationWithFusionRoot) {
Shape shape = ShapeUtil::MakeShape(F32, {8});
auto sub_builder = HloComputation::Builder(TestName() + "_sub");
auto sub_param = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "sub_param"));
auto one = sub_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto ones = sub_builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, one, {}));
auto add = sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sub_param, ones));
auto module = CreateNewVerifiedModule();
auto sub_computation = module->AddEmbeddedComputation(sub_builder.Build());
sub_computation->CreateFusionInstruction({add, ones},
HloInstruction::FusionKind::kLoop);
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto reverse =
builder.AddInstruction(HloInstruction::CreateReverse(shape, param, {0}));
auto call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {reverse}, sub_computation));
module->AddEntryComputation(builder.Build());
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(
dataflow_analysis->CanShareOperandBufferWithUser(reverse, {}, call, {}));
}
TEST_F(CanShareOperandBufferWithUserTest, ConcatSliceWithElementwise) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[10,20] parameter(0)
p1 = f32[10,20] parameter(1)
p2 = f32[10,10] parameter(2)
p3 = f32[10,10] parameter(3)
add0 = f32[10, 20] add(p0, p1)
sub0 = f32[10, 10] subtract(p2, p3)
reshape0 = f32[200] reshape(add0)
reshape1 = f32[100] reshape(sub0)
concat0 = f32[300] concatenate(reshape0, reshape1), dimensions={0}
slice0 = f32[200] slice(concat0), slice={[0:200]}
slice1 = f32[100] slice(concat0), slice={[200:300]}
ROOT tuple = (f32[200], f32[100]) tuple(slice0, slice1)
}
ENTRY test {
p0 = f32[10,20] parameter(0)
p1 = f32[10,20] parameter(1)
p2 = f32[10,10] parameter(2)
p3 = f32[10,10] parameter(3)
ROOT fusion = (f32[200], f32[100]) fusion(p0, p1, p2, p3), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
auto* fusion = module->entry_computation()->root_instruction();
auto* param0 = module->entry_computation()->parameter_instruction(0);
auto* param1 = module->entry_computation()->parameter_instruction(1);
auto* param2 = module->entry_computation()->parameter_instruction(2);
auto* param3 = module->entry_computation()->parameter_instruction(3);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {0}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {0}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param2, {},
fusion, {1}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param3, {},
fusion, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {1}));
}
TEST_F(CanShareOperandBufferWithUserTest, ConcatSliceNegativeTest) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
add0 = f32[100] add(p0, p1)
concat0 = f32[200] concatenate(p0, add0), dimensions={0}
slice0 = f32[100] slice(concat0), slice={[0:100]}
slice1 = f32[100] slice(concat0), slice={[100:200]}
ROOT tuple = (f32[100], f32[100]) tuple(slice0, slice1)
}
ENTRY test {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT fusion = (f32[100], f32[100]) fusion(p0, p1),
kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
auto* fusion = module->entry_computation()->root_instruction();
auto* param0 = module->entry_computation()->parameter_instruction(0);
auto* param1 = module->entry_computation()->parameter_instruction(1);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {0}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {1}));
}
TEST_F(CanShareOperandBufferWithUserTest, MultipleConcatenates) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
add0 = f32[100] add(p0, p1)
sub0 = f32[100] subtract(p1, p1)
concat0 = f32[200] concatenate(p0, add0), dimensions={0}
slice0 = f32[100] slice(concat0), slice={[0:100]}
slice1 = f32[100] slice(concat0), slice={[100:200]}
concat1 = f32[200] concatenate(p0, sub0), dimensions={0}
slice2 = f32[100] slice(concat1), slice={[0:100]}
slice3 = f32[100] slice(concat1), slice={[100:200]}
ROOT tuple = (f32[100], f32[100], f32[100], f32[100])
tuple(slice0, slice1, slice2, slice3)
}
ENTRY test {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT fusion = (f32[100], f32[100], f32[100], f32[100])
fusion(p0, p1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
auto* fusion = module->entry_computation()->root_instruction();
auto* param0 = module->entry_computation()->parameter_instruction(0);
auto* param1 = module->entry_computation()->parameter_instruction(1);
auto dataflow_analysis = RunAnalysis(*module);
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {1}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {2}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param0, {},
fusion, {3}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {1}));
EXPECT_TRUE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {3}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {0}));
EXPECT_FALSE(dataflow_analysis->CanShareOperandBufferWithUser(param1, {},
fusion, {2}));
}
using GetInPlaceInputOutputPairsTest = HloTestBase;
TEST_F(GetInPlaceInputOutputPairsTest, DUS) {
const char* kModule = R"(
HloModule test
ENTRY test {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT dus = f32[10] dynamic-update-slice(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* dus = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(dus);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, DUSFusion) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT dus = f32[10] dynamic-update-slice(p0, p1, p2)
}
ENTRY test {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT fusion = f32[10] fusion(p0, p1, p2), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, DUSFusionWithOutputOperandAliasing) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
dus = f32[10] dynamic-update-slice(p0, p1, p2)
ROOT tuple = (f32[5], f32[10]) tuple(p1, dus)
}
ENTRY test {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT fusion = (f32[5], f32[10]) fusion(p0, p1, p2), kind=kLoop, output_to_operand_aliasing={{0}: (1, {})}, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {1}});
expected_pairs.push_back({HloOperandIndex{1, {}}, {0}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, NonDUSFusion) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT add = f32[10] add(p0, p1)
}
ENTRY test {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT fusion = f32[10] fusion(p0, p1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
EXPECT_THAT(in_place_pairs, IsEmpty());
}
TEST_F(GetInPlaceInputOutputPairsTest, NonDUSFusionWithOutputOperandAliasing) {
const char* kModule = R"(
HloModule test
fused_computation {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT add = f32[10] add(p0, p1)
}
ENTRY test {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT fusion = f32[10] fusion(p0, p1), kind=kLoop, output_to_operand_aliasing={{}: (0, {})}, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, NestedDUSFusion) {
const char* kModule = R"(
HloModule test
fused_computation1 {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT dus = f32[10] dynamic-update-slice(p0, p1, p2)
}
fused_computation2 {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT fusion = f32[10] fusion(p0, p1, p2), kind=kLoop, calls=fused_computation1
}
ENTRY test {
p0 = f32[10] parameter(0)
p1 = f32[5] parameter(1)
p2 = s32[] parameter(2)
ROOT fusion = f32[10] fusion(p0, p1, p2), kind=kLoop, calls=fused_computation2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, NestedMultiOutputDUSFusion) {
const char* kModule = R"(
HloModule test
fused_computation1 {
p0 = s32[] parameter(0)
p1 = (f32[5],f32[10]) parameter(1)
gte0 = f32[5] get-tuple-element(p1), index=0
gte1 = f32[10] get-tuple-element(p1), index=1
dus = f32[10] dynamic-update-slice(gte1, gte0, p0)
negate = f32[5] negate(gte0)
ROOT tuple = (f32[5],f32[10]) tuple(negate, dus)
}
fused_computation2 {
p0 = f32[5] parameter(0)
p1 = (f32[10],s32[]) parameter(1)
gte0 = f32[10] get-tuple-element(p1), index=0
gte1 = s32[] get-tuple-element(p1), index=1
in_tuple = (f32[5],f32[10]) tuple(p0, gte0)
inner_fusion = (f32[5],f32[10]) fusion(gte1, in_tuple), kind=kLoop, calls=fused_computation1
fusion_gte0 = f32[5] get-tuple-element(inner_fusion), index=0
fusion_gte1 = f32[10] get-tuple-element(inner_fusion), index=1
negate = f32[5] negate(p0)
ROOT tuple = (f32[5],f32[5],f32[10]) tuple(negate, fusion_gte0, fusion_gte1)
}
ENTRY test {
p0 = f32[5] parameter(0)
p1 = (f32[10],s32[]) parameter(1)
ROOT fusion = (f32[5],f32[5],f32[10]) fusion(p0, p1), kind=kLoop, calls=fused_computation2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
HloInstruction* inner_fusion = FindInstruction(module.get(), "inner_fusion");
auto inner_in_place_pairs =
HloDataflowAnalysis::GetInPlaceInputOutputPairs(inner_fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> inner_expected_pairs;
inner_expected_pairs.push_back({HloOperandIndex{1, {1}}, {1}});
EXPECT_EQ(inner_in_place_pairs, inner_expected_pairs);
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{1, {0}}, {2}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, NestedLoopWithAliasingInDUSFusion) {
const char* kModule = R"(
HloModule test
copy_fusion {
input = s8[8,256,1,256] parameter(0)
ROOT copy.3 = s8[8,256,1,256] copy(input)
}
fused_computation.0 {
p0 = (s8[8,256,1,256],s8[1,256,1,256]) parameter(0)
gte0 = s8[8,256,1,256] get-tuple-element(p0), index=0
gte1 = s8[1,256,1,256] get-tuple-element(p0), index=1
fusion = s8[8,256,1,256] fusion(gte0), kind=kLoop, output_to_operand_aliasing={{}: (0, {})}, calls=copy_fusion
p1 = s8[1,256,1,256] parameter(1)
added = s8[1,256,1,256] add(gte1, p1)
p2 = s32[] parameter(2)
c0 = s32[] constant(0)
ROOT dynamic-update-slice.0 = s8[8,256,1,256] dynamic-update-slice(fusion, added, p2, c0, c0, c0)
}
ENTRY test {
p0 = (s8[8,256,1,256],s8[1,256,1,256]) parameter(0)
p1 = s8[1,256,1,256] parameter(1)
p2 = s32[] parameter(2)
ROOT fusion = s8[8,256,1,256] fusion(p0, p1, p2), kind=kLoop, calls=fused_computation.0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {0}}, {}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, DUSLoopFusionWithCollective) {
const char* kModule = R"(
HloModule LoopFusionAllReduce
fused_computation.1 {
p0 = bf16[2,8192,6144]{2,1,0:T(8,128)(2,1)} parameter(0)
ROOT slice = bf16[2,2048,6144]{2,1,0:T(8,128)(2,1)} slice(p0), slice={[0:2], [6144:8192], [0:6144]}
}
fused_computation.2 {
p0 = bf16[2,8192]{1,0:T(2,128)(2,1)} parameter(0)
ROOT slice = bf16[2,2048]{1,0:T(2,128)(2,1)} slice(p0), slice={[0:2], [6144:8192]}
}
sum {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
fused_computation {
p0 = bf16[1,2,8192,6144]{3,2,1,0:T(8,128)(2,1)} parameter(0)
p1 = bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)} parameter(1)
p2 = bf16[2,8192,6144]{2,1,0:T(8,128)(2,1)} parameter(2)
p3 = bf16[2,8192]{1,0:T(2,128)(2,1)} parameter(3)
fusion.1 = bf16[2,2048,6144]{2,1,0:T(8,128)(2,1)} fusion(p2), kind=kLoop, calls=fused_computation.1
bitcast = bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)} bitcast(fusion.1)
fusion.2 = bf16[2,2048]{1,0:T(2,128)(2,1)} fusion(p3), kind=kLoop, calls=fused_computation.2
broadcast = bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)} broadcast(fusion.2), dimensions={1,2}
multiply = bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)S(1)} multiply(bitcast, broadcast)
all-reduce = bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)} all-reduce(p1), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum
c0 = u32[] constant(0)
c1 = u32[] constant(4096)
dynamic-update-slice = bf16[1,2,8192,6144]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(p0, all-reduce, c0, c0, c1, c0)
ROOT tuple = (bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)S(1)}, bf16[1,2,8192,6144]{3,2,1,0:T(8,128)(2,1)}) tuple(multiply, dynamic-update-slice)
}
ENTRY entry {
p0 = bf16[2,8192,6144]{2,1,0:T(8,128)(2,1)} parameter(0)
p1 = bf16[2,8192]{1,0:T(2,128)(2,1)} parameter(1)
p2 = bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)} parameter(2)
p3 = bf16[1,2,8192,6144]{3,2,1,0:T(8,128)(2,1)} parameter(3)
ROOT fusion = (bf16[1,2,2048,6144]{3,2,1,0:T(8,128)(2,1)S(1)}, bf16[1,2,8192,6144]{3,2,1,0:T(8,128)(2,1)}) fusion(p3, p2, p0, p1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {1}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
TEST_F(GetInPlaceInputOutputPairsTest, DUSOutputFusionWithCollective) {
const char* kModule = R"(
HloModule OutputFusionAllReduce
fused_computation.0 {
p0 = bf16[4096,9216]{1,0:T(8,128)(2,1)} parameter(0)
ROOT slice = bf16[1024,9216]{1,0:T(8,128)(2,1)} slice(p0), slice={[3072:4096], [0:9216]}
}
fused_computation.1 {
p0 = s8[9216,6144]{1,0:T(8,128)(4,1)S(1)} parameter(0)
ROOT bitcast = s8[9216,6144]{1,0:T(8,128)(4,1)} bitcast(p0)
}
add {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT add = bf16[] add(x, y)
}
fused_computation {
p0 = bf16[4096,6144]{1,0:T(8,128)(2,1)} parameter(0)
p1 = bf16[1024,6144]{1,0:T(8,128)(2,1)S(1)} parameter(1)
p2 = bf16[4096,9216]{1,0:T(8,128)(2,1)} parameter(2)
p3 = s8[9216,6144]{1,0:T(8,128)(4,1)S(1)} parameter(3)
fusion1 = bf16[1024,9216]{1,0:T(8,128)(2,1)} fusion(p2), kind=kLoop, calls=fused_computation.0
fusion2 = s8[9216,6144]{1,0:T(8,128)(4,1)} fusion(p3), kind=kLoop, calls=fused_computation.1
convolution = bf16[1024,6144]{1,0:T(8,128)(2,1)S(1)} convolution(fusion1, fusion2), dim_labels=bf_io->bf
all-reduce = bf16[1024,6144]{1,0:T(8,128)(2,1)} all-reduce(p1), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
c1 = u32[] constant(2048)
c0 = u32[] constant(0)
dynamic-update-slice = bf16[4096,6144]{1,0:T(8,128)(2,1)} dynamic-update-slice(p0, all-reduce, c1, c0)
ROOT tuple = (bf16[1024,6144]{1,0:T(8,128)(2,1)S(1)}, bf16[4096,6144]{1,0:T(8,128)(2,1)}) tuple(convolution, dynamic-update-slice)
}
ENTRY entry {
p0 = bf16[4096,9216]{1,0:T(8,128)(2,1)} parameter(0)
p1 = s8[9216,6144]{1,0:T(8,128)(4,1)S(1)} parameter(1)
p2 = bf16[1024,6144]{1,0:T(8,128)(2,1)S(1)} parameter(2)
p3 = bf16[4096,6144]{1,0:T(8,128)(2,1)} parameter(3)
ROOT fusion = (bf16[1024,6144]{1,0:T(8,128)(2,1)S(1)}, bf16[4096,6144]{1,0:T(8,128)(2,1)}) fusion(p3, p2, p0, p1), kind=kOutput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModule));
HloInstruction* fusion = module->entry_computation()->root_instruction();
auto in_place_pairs = HloDataflowAnalysis::GetInPlaceInputOutputPairs(fusion);
std::vector<std::pair<HloOperandIndex, ShapeIndex>> expected_pairs;
expected_pairs.push_back({HloOperandIndex{0, {}}, {1}});
EXPECT_EQ(in_place_pairs, expected_pairs);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_dataflow_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_dataflow_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
48af6870-55d0-4586-aed0-fbd18837805e | cpp | tensorflow/tensorflow | despecializer | third_party/xla/xla/service/despecializer.cc | third_party/xla/xla/service/despecializer_test.cc | #include "xla/service/despecializer.h"
#include <iterator>
#include <utility>
#include <vector>
#include "xla/service/defuser.h"
#include "xla/service/float_normalization.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/sub_byte_normalization.h"
namespace xla {
Despecializer::Despecializer() : pipeline_("despecializer") {
pipeline_.AddPass<HloDescheduler>();
pipeline_.AddPass<ControlDepRemover>();
pipeline_.AddPass<Defuser>();
pipeline_.AddPass<BFloat16MixedPrecisionRemoval>();
pipeline_.AddPass<SubByteNormalization>(
SubByteNormalization::REMOVE_ELEMENT_SIZE);
}
void Despecializer::AddAssumeGatherIndicesInBoundRewriteToCopy() {
pipeline_.AddPass<AssumeGatherIndicesInBoundRewriteToCopy>();
}
void Despecializer::AddReduceWindowToReduceBroadcastDeconstruct() {
pipeline_.AddPass<DeconstructReduceWindowToReduceBroadcast>();
}
absl::StatusOr<bool> Despecializer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return pipeline_.Run(module, execution_threads);
}
absl::StatusOr<bool> AssumeGatherIndicesInBoundRewriteToCopy::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> candidates;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall("AssumeGatherIndicesInBound")) {
candidates.push_back(instruction);
}
}
}
for (HloInstruction* gather_indices : candidates) {
auto computation = gather_indices->parent();
auto copy = computation->AddInstruction(
HloInstruction::CreateUnary(gather_indices->shape(), HloOpcode::kCopy,
gather_indices->mutable_operand(0)));
TF_CHECK_OK(computation->ReplaceInstruction(gather_indices, copy));
}
return !candidates.empty();
}
absl::StatusOr<bool> DeconstructReduceWindowToReduceBroadcast::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<std::pair<HloInstruction*, int64_t>> candidate_rw;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kReduceWindow) {
continue;
}
auto* reduce_window = CastOrNull<HloReduceWindowInstruction>(instruction);
if (reduce_window == nullptr) {
continue;
}
if (reduce_window->operand(0)->shape() != reduce_window->shape()) {
continue;
}
const Window& window = reduce_window->window();
int64_t num_stride_dilations = absl::c_count_if(
window.dimensions(), [](const WindowDimension& win_dim) {
return (
win_dim.stride() != 1 || win_dim.window_reversal() == true ||
win_dim.window_dilation() != 1 || win_dim.base_dilation() != 1);
});
if (num_stride_dilations != 0) {
continue;
}
int64_t num_dimensions_reduced = absl::c_count_if(
window.dimensions(),
[](const WindowDimension& win_dim) { return (win_dim.size() != 1); });
if (num_dimensions_reduced != 1) {
continue;
}
auto reduce_dim = absl::c_find_if(
window.dimensions(),
[](const WindowDimension& win_dim) { return (win_dim.size() != 1); });
if (reduce_dim == window.dimensions().end()) {
continue;
}
int64_t reduce_dim_index =
std::distance(window.dimensions().begin(), reduce_dim);
auto input_dim_size =
reduce_window->operand(0)->shape().dimensions(reduce_dim_index);
if (reduce_dim->size() != 2 * input_dim_size - 1) {
continue;
}
if (reduce_dim->padding_low() != input_dim_size - 1) {
continue;
}
if (reduce_dim->padding_high() != input_dim_size - 1) {
continue;
}
VLOG(2) << "Adding Candidate ReduceWindow:" << reduce_window->ToString();
candidate_rw.push_back(std::make_pair(reduce_window, reduce_dim_index));
}
}
for (const auto& rw : candidate_rw) {
auto reduce_window = rw.first;
auto reduce_dim_index = rw.second;
if (reduce_window == nullptr || reduce_dim_index < 0 ||
reduce_dim_index >= reduce_window->operand(0)->shape().rank()) {
continue;
}
std::vector<int64_t> reduce_instr_dimensions;
std::vector<int64_t> broadcast_dimensions;
const Window& window = reduce_window->window();
for (int64_t index = 0; index < window.dimensions().size(); ++index) {
const auto& window_dimension = window.dimensions(index);
if (window_dimension.size() == 1) {
reduce_instr_dimensions.push_back(
reduce_window->operand(0)->shape().dimensions(index));
broadcast_dimensions.push_back(index);
}
}
Shape reduce_shape = ShapeUtil::MakeShape(
reduce_window->shape().element_type(), reduce_instr_dimensions);
auto reduce_instr =
reduce_window->AddInstruction(HloInstruction::CreateReduce(
reduce_shape, reduce_window->mutable_operand(0),
reduce_window->mutable_operand(1), {reduce_dim_index},
reduce_window->called_computations()[0]));
auto broadcast_instr =
reduce_window->AddInstruction(HloInstruction::CreateBroadcast(
reduce_window->shape(), reduce_instr, broadcast_dimensions));
VLOG(2) << "reduce_window:" << reduce_window->ToString();
VLOG(2) << "reduce:" << reduce_instr->ToString();
VLOG(2) << "broadcast:" << broadcast_instr->ToString();
TF_CHECK_OK(reduce_window->parent()->ReplaceInstruction(reduce_window,
broadcast_instr));
changed = true;
}
return changed;
}
} | #include "xla/service/despecializer.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class DespecializerTest : public HloTestBase {
protected:
Despecializer despecializer_;
};
TEST_F(DespecializerTest, ValidRW1) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x1x255 pad=0_0x0_0x0_0x127_127}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 3);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 1);
EXPECT_EQ(bcast->dimensions()[2], 2);
}
TEST_F(DespecializerTest, ValidRW2) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x15x1 pad=0_0x0_0x7_7x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 2);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 1);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW3) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,128,32,8]{1,3,2,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,128,32,8]{1,3,2,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x255x1x1 pad=0_0x127_127x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 1);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 2);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW4) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[8,32,32,128]{3,0,1,2} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[8,32,32,128]{3,0,1,2} reduce-window(param_0.938,constant.381.clone.1), window={size=15x1x1x1 pad=7_7x0_0x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 0);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 1);
EXPECT_EQ(bcast->dimensions()[1], 2);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW5) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x1x32 pad=0_0x0_0x0_0x0_31}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRW6) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32]{1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32]{1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=63x1 pad=31_31x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 0);
EXPECT_EQ(bcast->dimensions().size(), 1);
EXPECT_EQ(bcast->dimensions()[0], 1);
}
TEST_F(DespecializerTest, ValidRWMultiple) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=63x1x1x255 pad=31_31x0_0x0_0x127_127}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRWStrideDilation) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=1x1x1x255 pad=0_0x0_0x0_0x127_127 stride=2x1x1x1 lhs_dilate=2x1x1x1}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRWShape) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32,2,128]{3,2,1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=1x1x7x1 pad=0_0x0_0x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/despecializer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/despecializer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b7497017-5529-44a5-b7ab-7a0f9672552b | cpp | tensorflow/tensorflow | bfloat16_conversion_folding | third_party/xla/xla/service/bfloat16_conversion_folding.cc | third_party/xla/xla/service/bfloat16_conversion_folding_test.cc | #include "xla/service/bfloat16_conversion_folding.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
class BFloat16ConversionFoldingVisitor : public DfsHloVisitorWithDefault {
public:
explicit BFloat16ConversionFoldingVisitor(
HloComputation* computation, const FloatSupport* bfloat16_support,
BFloat16ConversionFolding* bfloat16_conversion_folding)
: computation_(computation),
bfloat16_support_(bfloat16_support),
bfloat16_conversion_folding_(bfloat16_conversion_folding) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status HandleAllReduce(HloInstruction* crs) override;
static bool Run(HloComputation* computation,
const FloatSupport* bfloat16_support,
BFloat16ConversionFolding* bfloat16_conversion_folding) {
BFloat16ConversionFoldingVisitor visitor(computation, bfloat16_support,
bfloat16_conversion_folding);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
private:
absl::Status TryFoldBF16Conversions(HloInstruction* hlo);
absl::Status FoldOutputConversions(HloInstruction* hlo);
absl::Status FoldOperandConversion(HloInstruction* hlo,
int64_t operand_index);
HloComputation* computation_;
const FloatSupport* bfloat16_support_;
BFloat16ConversionFolding* bfloat16_conversion_folding_;
bool changed_ = false;
};
absl::Status BFloat16ConversionFoldingVisitor::FoldOutputConversions(
HloInstruction* hlo) {
std::vector<HloInstruction*> materialized_users = hlo->users();
hlo->mutable_shape()->set_element_type(BF16);
bfloat16_conversion_folding_->UpdateLayout(hlo->mutable_shape());
for (auto user : materialized_users) {
CHECK_EQ(user->opcode(), HloOpcode::kConvert);
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(hlo));
changed_ = true;
}
return absl::OkStatus();
}
absl::Status BFloat16ConversionFoldingVisitor::FoldOperandConversion(
HloInstruction* hlo, int64_t operand_index) {
auto operand = hlo->mutable_operand(operand_index);
CHECK_EQ(operand->opcode(), HloOpcode::kConvert);
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWith(operand_index, operand->mutable_operand(0)));
changed_ = true;
return absl::OkStatus();
}
namespace {
bool AllUsersAreF32ToBF16Converts(const HloInstruction* hlo) {
if (hlo->user_count() == 0 || hlo->shape().element_type() != F32) {
return false;
}
for (const auto user : hlo->users()) {
if (user->opcode() == HloOpcode::kConvert &&
user->shape().element_type() == BF16) {
continue;
}
return false;
}
return true;
}
}
absl::Status BFloat16ConversionFoldingVisitor::TryFoldBF16Conversions(
HloInstruction* hlo) {
std::vector<int64_t> bf16_to_f32_operands;
bool has_other_f32_operands = false;
for (int64_t i = 0; i < hlo->operands().size(); ++i) {
auto operand = hlo->operand(i);
if (operand->shape().element_type() == F32) {
if (operand->opcode() == HloOpcode::kConvert &&
operand->operand(0)->shape().element_type() == BF16 &&
bfloat16_support_->SupportsLowPrecisionOperand(*hlo, i)) {
bf16_to_f32_operands.push_back(i);
} else {
has_other_f32_operands = true;
}
continue;
}
}
const bool fold_output_conversion =
AllUsersAreF32ToBF16Converts(hlo) &&
bfloat16_support_->SupportsLowPrecisionOutput(*hlo);
if (!bfloat16_support_->SupportsMixedPrecisions(*hlo)) {
if (has_other_f32_operands ||
(!fold_output_conversion && hlo->shape().element_type() == F32)) {
return absl::OkStatus();
}
}
if (fold_output_conversion) {
TF_RETURN_IF_ERROR(FoldOutputConversions(hlo));
}
for (int64_t i : bf16_to_f32_operands) {
TF_RETURN_IF_ERROR(FoldOperandConversion(hlo, i));
}
return absl::OkStatus();
}
absl::Status BFloat16ConversionFoldingVisitor::DefaultAction(
HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kFusion ||
hlo->opcode() == HloOpcode::kBitcastConvert ||
hlo->opcode() == HloOpcode::kConvert ||
hlo->opcode() == HloOpcode::kCall ||
hlo->opcode() == HloOpcode::kCustomCall ||
hlo->opcode() == HloOpcode::kWhile ||
hlo->opcode() == HloOpcode::kConditional ||
hlo->opcode() == HloOpcode::kAsyncStart ||
hlo->opcode() == HloOpcode::kAsyncDone ||
HloDataflowAnalysis::IsInPlaceOperation(hlo->opcode()) ||
hlo->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
if (hlo == computation_->root_instruction() &&
!bfloat16_support_->SupportsMixedPrecisions(*hlo)) {
return absl::OkStatus();
}
return TryFoldBF16Conversions(hlo);
}
absl::Status BFloat16ConversionFoldingVisitor::HandleAllReduce(
HloInstruction* crs) {
if (crs->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(DefaultAction(crs));
if (!bfloat16_support_->SupportsMixedPrecisions(*crs)) {
return absl::OkStatus();
}
if (!crs->shape().IsTuple()) {
return absl::OkStatus();
}
if (crs == computation_->root_instruction()) {
return absl::OkStatus();
}
std::vector<std::vector<HloInstruction*>> per_tuple_element_gtes(
crs->operand_count());
for (auto user : crs->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return absl::OkStatus();
}
per_tuple_element_gtes[user->tuple_index()].push_back(user);
}
for (int64_t i = 0; i < crs->operand_count(); ++i) {
auto all_gte_users_are_bf16_convert = [&per_tuple_element_gtes, i]() {
if (per_tuple_element_gtes[i].empty()) {
return false;
}
for (auto gte : per_tuple_element_gtes[i]) {
if (!AllUsersAreF32ToBF16Converts(gte)) {
return false;
}
}
return true;
};
if (!all_gte_users_are_bf16_convert()) {
continue;
}
ShapeUtil::GetMutableSubshape(crs->mutable_shape(), {i})
->set_element_type(BF16);
bfloat16_conversion_folding_->UpdateLayout(
ShapeUtil::GetMutableSubshape(crs->mutable_shape(), {i}));
for (auto gte : per_tuple_element_gtes[i]) {
TF_RETURN_IF_ERROR(FoldOutputConversions(gte));
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> BFloat16ConversionFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "BFloat16ConversionFolding::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (BFloat16ConversionFoldingVisitor::Run(comp, bfloat16_support_, this)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "BFloat16ConversionFolding::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/bfloat16_conversion_folding.h"
#include <cstdint>
#include <optional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
class TestBFloat16Support : public FloatSupport {
public:
TestBFloat16Support() : FloatSupport(BF16) {}
~TestBFloat16Support() override {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
};
class BFloat16ConversionFoldingTest : public HloTestBase {
protected:
BFloat16ConversionFoldingTest()
: HloTestBase(false,
true) {}
bool FoldConversions(HloModule* module) {
TestBFloat16Support bfloat16_support_;
BFloat16ConversionFolding fold(&bfloat16_support_);
absl::StatusOr<bool> result = fold.Run(module);
EXPECT_IS_OK(result.status());
return result.value();
}
};
TEST_F(BFloat16ConversionFoldingTest, FoldIfSupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, convert1, c));
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), BF16);
EXPECT_EQ(add1->operand(0), add0);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldIfUnsupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kMultiply, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, mul0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* mul1 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kMultiply, convert1, c));
HloInstruction* convert2 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, mul1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert2);
EXPECT_EQ(mul0->shape().element_type(), F32);
EXPECT_EQ(mul1->shape().element_type(), F32);
EXPECT_EQ(mul1->operand(0), convert1);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldUnsupportedMixedPrecision) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* sub0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kSubtract, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, sub0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* sub1 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kSubtract, convert1, c));
HloInstruction* convert2 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, sub1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert2);
EXPECT_EQ(sub0->shape().element_type(), F32);
EXPECT_EQ(sub1->shape().element_type(), F32);
EXPECT_EQ(sub1->operand(0), convert1);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldTuple) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, b));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({a, convert0}));
HloInstruction* gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 0));
HloInstruction* convert1 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert1);
EXPECT_EQ(gte->shape().element_type(), F32);
EXPECT_EQ(tuple->operand(1), convert0);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldAsyncOp) {
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
auto module = CreateNewVerifiedModule();
auto async_computation_builder = HloComputation::Builder("async_computation");
HloInstruction* async_a = async_computation_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "async_a"));
HloInstruction* async_b = async_computation_builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "async_b"));
HloInstruction* add =
async_computation_builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kAdd, async_a, async_b));
HloComputation* async_computation =
module->AddEmbeddedComputation(async_computation_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, b));
HloInstruction* async_start =
builder.AddInstruction(HloInstruction::CreateAsyncStart(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({f32_shape, f32_shape}), f32_shape,
ShapeUtil::MakeScalarShape(U32)}),
{a, convert0}, async_computation));
HloInstruction* async_done = builder.AddInstruction(
HloInstruction::CreateAsyncDone(f32_shape, async_start));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(bf16_shape, async_done));
HloComputation* computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(async_computation->root_instruction(), add);
EXPECT_EQ(computation->root_instruction(), convert1);
EXPECT_EQ(async_done->shape().element_type(), F32);
EXPECT_EQ(async_start->operand(1), convert0);
}
TEST_F(BFloat16ConversionFoldingTest, FoldAllReduceTupleOutput) {
auto builder = HloComputation::Builder(TestName());
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("add");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, x, y));
HloComputation* sum = module->AddEmbeddedComputation(sum_builder.Build());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* convert_a =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, a));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({f32_shape, f32_shape}), {convert_a, b}, sum,
CollectiveDeviceList(),
false,
std::nullopt, false));
HloInstruction* gte_a = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, crs, 0));
HloInstruction* gte_b = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, crs, 1));
HloInstruction* convert_gte_b =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte_b));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({gte_a, convert_gte_b}));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), tuple);
EXPECT_EQ(tuple->operand(0), gte_a);
EXPECT_EQ(tuple->operand(1), gte_b);
EXPECT_EQ(gte_a->shape().element_type(), F32);
EXPECT_EQ(gte_b->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0), a);
EXPECT_EQ(crs->operand(1), b);
EXPECT_EQ(a->shape().element_type(), BF16);
EXPECT_EQ(b->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {0}).element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), BF16);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_conversion_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_conversion_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
47a357da-1a32-44a7-86cd-d191e313508d | cpp | tensorflow/tensorflow | profile_guided_latency_estimator | third_party/xla/xla/service/profile_guided_latency_estimator.cc | third_party/xla/xla/service/profile_guided_latency_estimator_test.cc | #include "xla/service/profile_guided_latency_estimator.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace {
void HandleMissingInstructionCost(ProfileStatisticsAggregator* aggregator,
const HloInstruction* instruction) {
if (aggregator != nullptr) {
aggregator->HandleMissingInstructionCost(*instruction);
}
}
void HandleFoundInstructionCost(ProfileStatisticsAggregator* aggregator,
const HloInstruction* instruction) {
if (aggregator != nullptr) {
aggregator->HandleFoundInstructionCost(*instruction);
}
}
void HandleMissingInstructionLatency(ProfileStatisticsAggregator* aggregator,
const HloGraphNode& from,
const HloGraphNode& to) {
if (aggregator != nullptr) {
aggregator->HandleMissingInstructionLatency(from.GetInstr(), to.GetInstr());
}
}
void HandleFoundInstructionLatency(ProfileStatisticsAggregator* aggregator,
const HloGraphNode& from,
const HloGraphNode& to) {
if (aggregator != nullptr) {
aggregator->HandleFoundInstructionLatency(from.GetInstr(), to.GetInstr());
}
}
}
LatencyEstimator::TimeCost ProfileGuidedLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& target) const {
static constexpr HloGraphNode::TimeCost kLowLatency = 1.0;
const HloOpcode from_op = from.GetInstr().opcode();
if (!config_.schedule_send_recvs &&
(from_op == HloOpcode::kSend || from_op == HloOpcode::kRecv)) {
return kLowLatency;
}
auto it = instr_map_.find(from.GetInstr().name());
if (it == instr_map_.end() &&
(from.GetInstr().opcode() == HloOpcode::kAsyncStart ||
from.GetInstr().opcode() == HloOpcode::kAsyncDone)) {
absl::string_view wrapped_inst_name =
from.GetInstr().async_wrapped_instruction()->name();
VLOG(2) << "PGLE found async wrapped instruction: " << wrapped_inst_name
<< " in " << from.GetInstr().name();
it = instr_map_.find(wrapped_inst_name);
}
if (it == instr_map_.end()) {
VLOG(1)
<< "PGLE did NOT find wrapped instruction name or async start. From: "
<< from.GetInstr().name();
HandleMissingInstructionLatency(aggregator_.get(), from, target);
return latency_estimator_->GetLatencyBetween(from, target);
}
auto it2 = it->second.latencies.find(target.GetInstr().name());
if (it2 == it->second.latencies.end() &&
(target.GetInstr().opcode() == HloOpcode::kAsyncStart ||
target.GetInstr().opcode() == HloOpcode::kAsyncDone)) {
it2 = it->second.latencies.find(
target.GetInstr().async_wrapped_instruction()->name());
}
if (it2 != it->second.latencies.end()) {
VLOG(2) << "PGLE found latency between " << from.GetInstr().name()
<< " and " << target.GetInstr().name() << " in latency info";
HandleFoundInstructionLatency(aggregator_.get(), from, target);
return it2->second * CyclesPerMicrosecond();
}
if (it->second.cost.has_value() &&
(IsAsyncPair(from, target) || IsP2pPair(from, target))) {
VLOG(2) << "PGLE found latency for async op " << from.GetInstr().name()
<< " and (assumed)" << target.GetInstr().name()
<< " in instruction costs";
HandleFoundInstructionLatency(aggregator_.get(), from, target);
return *it->second.cost * CyclesPerMicrosecond();
}
VLOG(1) << "PGLE did not find relevant profiling info for '"
<< from.GetInstr().name() << "', and '" << target.GetInstr().name()
<< "'.";
HandleMissingInstructionLatency(aggregator_.get(), from, target);
return latency_estimator_->GetLatencyBetween(from, target);
}
LatencyEstimator::TimeCost ProfileGuidedLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (hlo_query::IsAsyncCollectiveStartOp(instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
static constexpr TimeCost kLowCost = 1.0;
return kLowCost;
}
if (auto it = instr_map_.find(instr->name());
it != instr_map_.end() && it->second.cost.has_value()) {
VLOG(2) << "PGLE found cost for: " << instr->name();
HandleFoundInstructionCost(aggregator_.get(), instr);
return *it->second.cost;
}
VLOG(1) << "PGLE missed cost for: " << instr->name();
HandleMissingInstructionCost(aggregator_.get(), instr);
return latency_estimator_->NodeCost(instr);
}
ProfileStatisticsAggregator::Statistics
ProfileStatisticsAggregator::GetStats() {
return {
found_instructions_count_,
missing_instructions_,
};
}
absl::Status ProfileGuidedLatencyEstimator::CheckAccuracy(
const HloModule& module) {
if (aggregator_ == nullptr) {
return absl::FailedPreconditionError(
"Failing because `aggregator_` was not provided when constructing "
"PGLE.");
}
for (const auto& comp : module.computations()) {
if (!comp->IsEntryComputation() && !comp->IsWhileBodyComputation()) {
continue;
}
for (const HloInstruction* instr : comp->MakeInstructionPostOrder()) {
NodeCost(instr);
HloGraphNode from(instr, -1);
for (const HloInstruction* user : instr->users()) {
HloGraphNode to(user, -1);
GetLatencyBetween(from, to);
}
}
}
ProfileStatisticsAggregator::Statistics stats = aggregator_->GetStats();
size_t missing_instructions_count = stats.missing_instructions.size();
if (missing_instructions_count > 0) {
LOG(ERROR) << "Found " << stats.found_instructions_count
<< " instructions from the profile.";
LOG(ERROR) << "Missing " << missing_instructions_count
<< " instructions from the profile.";
for (const HloInstruction* instr : stats.missing_instructions) {
LOG(ERROR) << " " << instr->name();
}
return absl::InvalidArgumentError(
absl::StrCat("Found ", missing_instructions_count,
" missing instructions. Discarding the profile."));
}
return absl::OkStatus();
}
ProfileGuidedLatencyEstimator::ProfileGuidedLatencyEstimator(
const SchedulerConfig& config,
std::unique_ptr<LatencyEstimator> latency_estimator,
const tensorflow::profiler::ProfiledInstructionsProto& proto,
std::unique_ptr<ProfileStatisticsAggregator> aggregator)
: config_(config),
latency_estimator_(std::move(latency_estimator)),
aggregator_(std::move(aggregator)) {
const int cycles_per_microsecond = latency_estimator_->CyclesPerMicrosecond();
for (const auto& instr_cost : proto.costs()) {
instr_map_[instr_cost.name()] =
ProfileInfo{instr_cost.cost_us() * cycles_per_microsecond};
}
for (const auto& latency : proto.latencies()) {
auto it = instr_map_.insert(std::make_pair(latency.source(), ProfileInfo{}))
.first;
it->second.latencies[latency.target()] =
latency.latency_us() * cycles_per_microsecond;
}
}
} | #include "xla/service/profile_guided_latency_estimator.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
int GetIndex(absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
}
SchedulerConfig GetDefaultSchedConfig() {
SchedulerConfig sched_cfg;
return sched_cfg;
}
absl::StatusOr<bool> RunScheduler(
HloModule* module, const SchedulerConfig& sched_config,
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>()) {
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
auto async_tracker = std::make_unique<AsyncTracker>(sched_config);
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
bool value, LatencyHidingScheduler(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
}
class LatencyHidingSchedulerTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
return ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest());
}
};
TEST_P(LatencyHidingSchedulerTest, TestProfileGuidedLatencyEstimator) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[1024,2048,2048]{2,1,0} parameter(2)
p3 = f32[2048,2048,2048]{2,1,0} parameter(3)
cp1s = (f32[1024,2048,2048]{2,1,0}, f32[1024,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p2), source_target_pairs={{1,0},{0,3},{3,2}}
cp2s = (f32[2048,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p3), source_target_pairs={{1,0},{0,3},{3,2}}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
cp1d = f32[1024,2048,2048]{2,1,0} collective-permute-done(cp1s)
cp2d = f32[2048,2048,2048]{2,1,0} collective-permute-done(cp2s)
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[1024,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}) tuple(c0, cp1d, cp2d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
std::string profiled_instructions_text_proto;
if (GetParam()) {
profiled_instructions_text_proto = R"pb(
costs { name: "c0" cost_us: 10.0 }
latencies { source: "cp1s" target: "cp1d" latency_us: 40.0 }
latencies { source: "cp2s" target: "cp2d" latency_us: 80.0 }
)pb";
} else {
profiled_instructions_text_proto = R"pb(
costs { name: "c0" cost_us: 10.0 }
costs { name: "cp1s" cost_us: 40.0 }
costs { name: "cp2s" cost_us: 80.0 }
)pb";
}
tensorflow::profiler::ProfiledInstructionsProto profiled_instructions_proto;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
profiled_instructions_text_proto, &profiled_instructions_proto));
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
profiled_instructions_proto);
EXPECT_TRUE(
RunScheduler(hlo_module.get(), sched_config, std::move(latency_estimator))
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2s"),
GetIndex(new_instruction_sequence, "cp1s"));
}
INSTANTIATE_TEST_SUITE_P(LatencyHidingSchedulerTest, LatencyHidingSchedulerTest,
::testing::Bool());
using ProfileGuidedLatencyEstimatorTest = HloTestBase;
TEST_F(ProfileGuidedLatencyEstimatorTest,
TestProfileGuidedLatencyEstimatorWithAsyncInstruction) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
add.1 {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
reduce-scatter-start = ((f32[16,64,256]{2,1,0}, f32[16,64,256]{2,1,0}), (f32[4,64,256]{2,1,0}, f32[4,64,256]{2,1,0})) reduce-scatter-start(p0, p1), channel_id=1, replica_groups={}, dimensions={0}, to_apply=add.1
reduce-scatter-done = (f32[4,64,256]{2,1,0}, f32[4,64,256]{2,1,0}) reduce-scatter-done(reduce-scatter-start)
ROOT gte = f32[4,64,256]{2,1,0} get-tuple-element(reduce-scatter-done), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_TRUE(hlo_module->has_entry_computation());
std::string profiled_instructions_text_proto = R"pb(
costs { name: "reduce-scatter" cost_us: 120.0 }
)pb";
;
tensorflow::profiler::ProfiledInstructionsProto profiled_instructions_proto;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
profiled_instructions_text_proto, &profiled_instructions_proto));
auto sched_config = GetDefaultSchedConfig();
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
profiled_instructions_proto);
HloInstruction* rs_start =
FindInstruction(hlo_module.get(), "reduce-scatter-start");
HloInstruction* rs_done =
FindInstruction(hlo_module.get(), "reduce-scatter-done");
HloGraphNode rs_start_node = HloGraphNode(rs_start, 0);
HloGraphNode rs_done_node = HloGraphNode(rs_done, 1);
double latency =
latency_estimator->GetLatencyBetween(rs_start_node, rs_done_node);
EXPECT_EQ(latency, 120.0);
}
TEST_F(ProfileGuidedLatencyEstimatorTest,
TestProfileGuidedLatencyEstimatorWithP2pInstruction) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
after-all.1 = token[] after-all()
send.7.0 = (f32[16,64,256]{2,1,0}, u32[], token[]) send(p0, after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_source_target_pairs="{{0,1}}"}
send-done.7.0 = token[] send-done(send.7.0), channel_id=1
recv.7.0 = (f32[16,64,256]{2,1,0}, u32[], token[]) recv(after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_source_target_pairs="{{0,1}}"}
recv-done.7.0 = (f32[16,64,256]{2,1,0}, token[]) recv-done(recv.7.0), channel_id=1
ROOT recv-data = f32[16,64,256]{2,1,0} get-tuple-element(recv-done.7.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_TRUE(hlo_module->has_entry_computation());
std::string profiled_instructions_text_proto = R"pb(
costs { name: "send.7.0" cost_us: 110.0 }
costs { name: "recv.7.0" cost_us: 100.0 }
)pb";
;
tensorflow::profiler::ProfiledInstructionsProto profiled_instructions_proto;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
profiled_instructions_text_proto, &profiled_instructions_proto));
auto sched_config = GetDefaultSchedConfig();
sched_config.schedule_send_recvs = true;
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
profiled_instructions_proto);
HloInstruction* send_start = FindInstruction(hlo_module.get(), "send.7.0");
HloInstruction* send_done =
FindInstruction(hlo_module.get(), "send-done.7.0");
HloInstruction* recv_start = FindInstruction(hlo_module.get(), "recv.7.0");
HloInstruction* recv_done =
FindInstruction(hlo_module.get(), "recv-done.7.0");
HloGraphNode send_start_node = HloGraphNode(send_start, 0);
HloGraphNode send_done_node = HloGraphNode(send_done, 1);
HloGraphNode recv_start_node = HloGraphNode(recv_start, 2);
HloGraphNode recv_done_node = HloGraphNode(recv_done, 3);
double send_latency =
latency_estimator->GetLatencyBetween(send_start_node, send_done_node);
double recv_latency =
latency_estimator->GetLatencyBetween(recv_start_node, recv_done_node);
EXPECT_EQ(send_latency, 110.0);
EXPECT_EQ(recv_latency, 100.0);
}
TEST_F(ProfileGuidedLatencyEstimatorTest,
ProfileGuidedLatencyEstimatorCheckAccuracyFailsIfMissingAggregator) {
std::string kFdoProfile = "";
absl::string_view kHloModule = R"(
HloModule module
ENTRY main {
p0 = f32[1] parameter(0)
ROOT add0 = f32[1] add(p0,p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
tensorflow::profiler::ProfiledInstructionsProto fdo_profile;
ASSERT_TRUE(
tsl::protobuf::TextFormat::ParseFromString(kFdoProfile, &fdo_profile));
auto sched_config = GetDefaultSchedConfig();
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
fdo_profile);
EXPECT_THAT(latency_estimator->CheckAccuracy(*hlo_module),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/profile_guided_latency_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/profile_guided_latency_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6347815-cf44-490a-9deb-925367e87a81 | cpp | tensorflow/tensorflow | value_range | third_party/xla/xla/service/value_range.cc | third_party/xla/xla/service/value_range_test.cc | #include "xla/service/value_range.h"
#include <optional>
#include <string>
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
std::optional<int64_t> Range::GetSingleSignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetSignedValue();
}
std::optional<int64_t> Range::GetSingleUnsignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetUnsignedValue();
}
std::string Range::ToString() const {
if (IsEmpty()) {
return std::string("Empty");
}
return absl::StrCat("min: ", min_.ToString(), " max: ", max_.ToString());
}
Range RecursivelyIdentifyRange(
const HloInstruction* instr,
const absl::flat_hash_map<const HloInstruction*, Range>&
predefined_ranges) {
if ((!instr->shape().IsInteger() && instr->shape().element_type() != PRED) ||
instr->shape().dimensions_size() != 0) {
return Range{};
}
VLOG(5) << "Computing Range for " << instr->ToString();
auto it = predefined_ranges.find(instr);
if (it != predefined_ranges.end()) {
VLOG(5) << "Found range! " << it->second.max().GetSignedValue() << " "
<< it->second.min().GetSignedValue();
return it->second;
}
switch (instr->opcode()) {
case HloOpcode::kCompare: {
VLOG(5) << "Handling Compare";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (instr->comparison_direction() != ComparisonDirection::kLt) {
return Range{};
}
if (lhs.max().lt(rhs.min())) {
return Range{ConstantValue::GetOne(1, false),
ConstantValue::GetOne(1, false),
true};
}
if (!lhs.min().lt(rhs.max())) {
return Range{
ConstantValue::GetZero(1, false),
ConstantValue::GetZero(1, false),
true};
}
VLOG(5) << "Compare failed";
VLOG(5) << "rhs max " << rhs.max().GetSignedValue() << " rhs min "
<< rhs.min().GetSignedValue() << " lhs max "
<< lhs.max().GetSignedValue() << " lhs min "
<< lhs.min().GetSignedValue();
return Range{};
}
case HloOpcode::kConstant: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Constant";
const int64_t bitwidth =
primitive_util::BitWidth(instr->shape().element_type());
const bool is_signed =
primitive_util::IsSignedIntegralType(instr->shape().element_type());
if (is_signed) {
const int64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetSigned(value, bitwidth),
ConstantValue::GetSigned(value, bitwidth),
true};
}
const uint64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetUnsigned(value, bitwidth),
ConstantValue::GetUnsigned(value, bitwidth),
true};
}
case HloOpcode::kAdd: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Add";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().add(rhs.min());
ConstantValue max = lhs.max().add(rhs.max());
if (max.lt(min)) {
VLOG(5) << "Add wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
case HloOpcode::kSelect: {
VLOG(5) << "Handling Select";
const HloInstruction* cmp = instr->operand(0);
Range cmp_range = RecursivelyIdentifyRange(cmp, predefined_ranges);
if (cmp_range.IsEmpty() || !cmp_range.IsSingleValue()) {
VLOG(5) << "Select failed";
return Range{};
}
if (cmp_range.GetSingleSignedValue() == 0) {
return RecursivelyIdentifyRange(instr->operand(2), predefined_ranges);
}
return RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
}
case HloOpcode::kSubtract: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Subtract";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().sub(rhs.max());
ConstantValue max = lhs.max().sub(rhs.min());
if (max.lt(min)) {
VLOG(5) << "Subtract wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
default:
break;
}
VLOG(5) << "Unsupported instruction: " << instr->ToString();
return Range{};
}
} | #include "xla/service/value_range.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ValueRangeTest : public HloTestBase {};
TEST_F(ValueRangeTest, AddedValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 124);
EXPECT_EQ(range.max().GetSignedValue(), 129);
}
TEST_F(ValueRangeTest, AddedValueUnsigned) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = u16[] constant(32768)
p0 = u16[] parameter(0)
ROOT %a = u16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, false),
ConstantValue::GetUnsigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetUnsignedValue(), 32768);
EXPECT_EQ(range.max().GetUnsignedValue(), 32773);
}
TEST_F(ValueRangeTest, SubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), -124);
EXPECT_EQ(range.max().GetSignedValue(), -119);
}
TEST_F(ValueRangeTest, SelectValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(p0, c0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), -119);
EXPECT_EQ(range.min().GetSignedValue(), -124);
}
TEST_F(ValueRangeTest, SelectValue2) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(c0, p0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(1);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), 129);
EXPECT_EQ(range.min().GetSignedValue(), 124);
}
TEST_F(ValueRangeTest, AddSubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
c1 = s32[] constant(12)
c2 = s32[] constant(5)
p0 = s32[] parameter(0)
sub = s32[] subtract(p0, c0)
a = s32[] add(sub, c1)
sub2 = s32[] subtract(c2, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(1)->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 112);
EXPECT_EQ(range.max().GetSignedValue(), 117);
}
TEST_F(ValueRangeTest, SubtractWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetSigned(-32768, 16),
ConstantValue::GetZero(16, true),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
TEST_F(ValueRangeTest, AddWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetZero(16, true),
ConstantValue::GetSigned(32760, 16),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/value_range.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/value_range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5df53855-dc84-4c12-a642-74b1e73fc48e | cpp | tensorflow/tensorflow | transfer_manager | third_party/xla/xla/service/transfer_manager.cc | third_party/xla/xla/tests/transfer_manager_test.cc | #include "xla/service/transfer_manager.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/const_init.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/literal.h"
#include "xla/service/compiler.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Mutex TransferManager::platform_transfer_manager_mutex_(
absl::kConstInit);
absl::flat_hash_map<se::Platform::Id, TransferManager::State>*
TransferManager::GetPlatformTransferManagers() {
static auto* r =
new absl::flat_hash_map<se::Platform::Id, TransferManager::State>;
return r;
}
absl::StatusOr<Literal> TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) {
Literal literal(device_buffer.on_host_shape());
TF_RETURN_IF_ERROR(TransferLiteralFromDevice(stream, device_buffer, &literal,
transfer_metadata));
return std::move(literal);
}
absl::Status TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const MutableBorrowingLiteral& literal,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
absl::Status ret;
tsl::Notification n;
TransferLiteralFromDevice(
substream, device_buffer, literal,
[&](absl::Status status) {
ret = status;
n.Notify();
},
transfer_metadata);
n.WaitForNotification();
return ret;
}
absl::Status TransferManager::TransferLiteralToDevice(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
TF_RETURN_IF_ERROR(TransferLiteralToDeviceAsync(
substream, literal, device_buffer, transfer_metadata));
return substream->BlockHostUntilDone();
}
absl::StatusOr<Literal> TransferManager::TransferArrayFromDevice(
se::Stream* stream, const Shape& shape, const se::DeviceMemoryBase& source,
const TransferMetadata* transfer_metadata) {
TF_RET_CHECK(shape.IsArray());
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
HostShapeToDeviceShape(shape), shape));
Literal literal(shape);
ShapedBuffer shaped_buffer(shape, stream->parent()->device_ordinal());
shaped_buffer.set_buffer(source, {});
TF_RETURN_IF_ERROR(TransferLiteralFromDevice(stream, shaped_buffer, &literal,
transfer_metadata));
return std::move(literal);
}
absl::Status TransferManager::TransferArrayToDevice(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
TF_RETURN_IF_ERROR(
TransferArrayToDeviceAsync(substream, literal, dest, transfer_metadata));
return substream->BlockHostUntilDone();
}
absl::Status TransferManager::TransferArrayToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata) {
TF_RET_CHECK(literal.shape().IsArray());
ShapedBuffer shaped_buffer(HostShapeToDeviceShape(literal.shape()),
stream->parent()->device_ordinal());
shaped_buffer.set_buffer(dest, {});
return TransferLiteralToDeviceAsync(stream, literal, shaped_buffer,
transfer_metadata);
}
absl::Status TransferManager::ReadDynamicShapes(
se::Stream* stream, const ShapedBuffer* device_buffer,
Shape* device_shape) {
DCHECK(device_shape->is_dynamic());
Shape original_device_shape = *device_shape;
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
TF_ASSIGN_OR_RETURN(
auto compiler, Compiler::GetForPlatform(stream->parent()->GetPlatform()));
TF_RETURN_IF_ERROR(device_buffer->buffers().ForEachElementWithStatus(
[&](const ShapeIndex& index,
const se::DeviceMemoryBase& buffer) -> absl::Status {
const Shape& buffer_shape =
ShapeUtil::GetSubshape(*device_shape, index);
if (buffer_shape.IsTuple()) {
return absl::OkStatus();
}
Shape& device_sub_shape =
*ShapeUtil::GetMutableSubshape(device_shape, index);
if (device_sub_shape.is_static()) {
return absl::OkStatus();
}
auto shape_size_fn = compiler->ShapeSizeBytesFunction();
Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape);
const int64_t offset = shape_size_fn(buffer_shape_static);
int64_t metadata_size = shape_size_fn(buffer_shape) - offset;
if (metadata_size == 0) {
return InvalidArgument("Dynamic shape metadata size should not be 0");
}
auto buffer_8 = se::DeviceMemory<uint8_t>(buffer);
auto metadata_buffer = buffer_8.GetSlice(offset, metadata_size);
TF_ASSIGN_OR_RETURN(
auto metadata,
TransferArrayFromDevice(
stream,
ShapeUtil::MakeShape(S32, {buffer_shape.dimensions_size()}),
metadata_buffer));
for (int64_t i = 0; i < metadata.element_count(); ++i) {
device_sub_shape.mutable_dimensions()[i] = metadata.Get<int32_t>({i});
}
return absl::OkStatus();
}));
device_shape->clear_dynamic_dimensions();
TF_RET_CHECK(ShapeUtil::DynamicShapeIsCompatible(*device_shape,
original_device_shape));
return absl::OkStatus();
}
void TransferManager::RegisterTransferManager(
se::Platform::Id platform_id,
TransferManagerCreationFunction creation_function) {
absl::MutexLock lock(&TransferManager::platform_transfer_manager_mutex_);
auto* managers = GetPlatformTransferManagers();
CHECK(managers->find(platform_id) == managers->end());
(*managers)[platform_id].creation_function = creation_function;
}
absl::StatusOr<TransferManager*> TransferManager::GetForPlatform(
const se::Platform* platform) {
absl::MutexLock lock(&TransferManager::platform_transfer_manager_mutex_);
auto* managers = GetPlatformTransferManagers();
auto it = managers->find(platform->id());
if (it == managers->end()) {
return NotFound(
"could not find registered transfer manager for platform %s -- check "
"target linkage",
platform->Name());
}
if (it->second.manager == nullptr) {
it->second.manager = (*it->second.creation_function)();
}
return it->second.manager.get();
}
absl::Status TransferManager::WriteTupleIndexTables(
se::Stream* stream, const ShapedBuffer& device_buffer) {
TF_RETURN_IF_ERROR(WriteTupleIndexTablesAsync(stream, device_buffer));
return stream->BlockHostUntilDone();
}
absl::Status TransferManager::WriteTupleIndexTablesAsync(
se::Stream* stream, const ShapedBuffer& device_buffer) {
VLOG(2) << "Writing tuple index tables for " << device_buffer;
return ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& device_subshape,
const ShapeIndex& index) -> absl::Status {
if (device_subshape.IsTuple() &&
ShapeUtil::TupleElementCount(device_subshape) > 0) {
se::DeviceMemoryBase device_memory = device_buffer.buffer(index);
TF_RET_CHECK(GetByteSizeRequirement(device_subshape) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
ShapeIndex element_index = index;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(device_subshape);
++i) {
element_index.push_back(i);
elements.push_back(device_buffer.buffer(element_index));
element_index.pop_back();
}
return WriteSingleTupleIndexTable(stream, elements, device_subshape,
&device_memory);
}
return absl::OkStatus();
});
}
absl::Status TransferManager::WriteRootTupleIndexTable(
se::Stream* stream, const ShapedBuffer& device_buffer) {
TF_RET_CHECK(device_buffer.on_device_shape().IsTuple());
if (ShapeUtil::TupleElementCount(device_buffer.on_device_shape()) == 0) {
return absl::OkStatus();
}
se::DeviceMemoryBase device_memory = device_buffer.buffer({});
TF_RET_CHECK(GetByteSizeRequirement(device_buffer.on_device_shape()) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
elements.reserve(
ShapeUtil::TupleElementCount(device_buffer.on_device_shape()));
for (int64_t i = 0;
i < ShapeUtil::TupleElementCount(device_buffer.on_device_shape()); ++i) {
elements.push_back(device_buffer.buffer({i}));
}
return WriteSingleTupleIndexTable(
stream, elements, device_buffer.on_device_shape(), &device_memory);
}
absl::Status TransferManager::WriteRootTupleIndexTable(
se::Stream* stream, const ShapeTree<MaybeOwningDeviceMemory>& buffer_tree) {
TF_RET_CHECK(buffer_tree.shape().IsTuple());
if (ShapeUtil::TupleElementCount(buffer_tree.shape()) == 0) {
return absl::OkStatus();
}
se::DeviceMemoryBase device_memory =
buffer_tree.element({}).AsDeviceMemoryBase();
TF_RET_CHECK(GetByteSizeRequirement(buffer_tree.shape()) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
elements.reserve(ShapeUtil::TupleElementCount(buffer_tree.shape()));
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(buffer_tree.shape());
++i) {
elements.push_back(buffer_tree.element({i}).AsDeviceMemoryBase());
}
return WriteSingleTupleIndexTable(stream, elements, buffer_tree.shape(),
&device_memory);
}
absl::StatusOr<ScopedShapedBuffer> TransferManager::AllocateScopedShapedBuffer(
const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
int device_ordinal, int physical_device_ordinal,
DeviceShapeRepresentationFn shape_representation_fn) {
if (!LayoutUtil::HasLayout(on_host_shape)) {
return InvalidArgument("Shape must have a layout: %s",
ShapeUtil::HumanStringWithLayout(on_host_shape));
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape));
Shape on_device_shape = (shape_representation_fn == nullptr)
? HostShapeToDeviceShape(on_host_shape)
: shape_representation_fn(on_host_shape);
TF_RET_CHECK(LayoutUtil::HasLayout(on_device_shape));
ScopedShapedBuffer shaped_buffer(std::move(on_device_shape), allocator,
device_ordinal, physical_device_ordinal);
for (auto& pair : shaped_buffer.buffers()) {
const ShapeIndex& index = pair.first;
se::DeviceMemoryBase& memory_base = pair.second;
const Shape& subshape =
ShapeUtil::GetSubshape(shaped_buffer.on_device_shape(), index);
TF_ASSIGN_OR_RETURN(auto memory,
allocator->Allocate(shaped_buffer.device_ordinal(),
GetByteSizeRequirement(subshape),
true,
LayoutUtil::MemorySpace(subshape)));
memory_base = memory.Release();
}
return std::move(shaped_buffer);
}
absl::StatusOr<Shape> TransferManager::ChooseCompactLayoutForShape(
const Shape& host_shape) const {
return LayoutUtil::GetWithDefaultLayout(host_shape);
}
xla::Shape TransferManager::ChooseGoodInfeedLayout(const Shape& shape) const {
return LayoutUtil::GetWithDefaultLayout(shape);
}
} | #include <memory>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/generic_transfer_manager.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/stream_pool.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/local_client_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class TransferManagerTest : public LocalClientTestBase {
protected:
TransferManagerTest()
: shape_size_fn_([this](const Shape& shape) {
return transfer_manager_->GetByteSizeRequirement(shape);
}) {
stream_ptr_ = local_client_->mutable_backend()
->BorrowStream(stream_executor_)
.value();
stream_ = stream_ptr_.get();
}
~TransferManagerTest() override = default;
ScopedShapedBuffer AllocateDeviceBuffer(const Shape& shape) {
return transfer_manager_
->AllocateScopedShapedBuffer(
shape, GetOrCreateAllocator(local_client_->platform()),
0)
.value();
}
protected:
StreamPool::Ptr stream_ptr_;
se::Stream* stream_;
private:
std::function<int64_t(const Shape&)> shape_size_fn_;
};
XLA_TEST_F(TransferManagerTest, TransferR0U32) {
Literal literal = LiteralUtil::CreateR0<uint32_t>(42);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR0Equal<uint32_t>(42, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1F32) {
Literal literal =
LiteralUtil::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f});
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>({1.25f, 2.5f, -17.0f, -20.125f},
result);
}
XLA_TEST_F(TransferManagerTest, TransferR1F32AwkwardSizes) {
constexpr int kMaxR1Size = (1 << 11);
for (int i = 0; i < kMaxR1Size; ++i) {
std::vector<float> inputs(i);
std::iota(inputs.begin(), inputs.end(), 0);
Literal literal = LiteralUtil::CreateR1<float>(inputs);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>(inputs, result);
}
}
XLA_TEST_F(TransferManagerTest, TransferR1LargeF32) {
std::vector<float> test_vector(1024 * 1024);
std::iota(test_vector.begin(), test_vector.end(), 0);
Literal literal = LiteralUtil::CreateR1<float>(test_vector);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>(test_vector, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1LargeUnalignedF32) {
std::vector<float> test_vector(1025);
std::iota(test_vector.begin(), test_vector.end(), 0);
Shape shape = ShapeUtil::MakeShape(F32, {1024});
BorrowingLiteral literal(reinterpret_cast<const char*>(&test_vector[1]),
shape);
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
std::vector<float> expected_output(1024);
std::iota(expected_output.begin(), expected_output.end(), 1);
LiteralTestUtil::ExpectR1Equal<float>(expected_output, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1U8) {
const char* test_string = "0123456789abcdef";
Literal literal = LiteralUtil::CreateR1U8(test_string);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_EQ(result.GetR1U8AsString(), test_string);
}
XLA_TEST_F(TransferManagerTest, TransferR2F32) {
Literal literal =
LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR2Equal<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, result);
}
XLA_TEST_F(TransferManagerTest,
TransferR2F32AndChangeLayoutTransferringToDevice) {
Literal literal = LiteralUtil::CreateR2WithLayout<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, LayoutUtil::MakeLayout({0, 1}));
const Shape ondevice_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 3}, {1, 0});
auto device_buffer = AllocateDeviceBuffer(ondevice_shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_FALSE(
LayoutUtil::Equal(result.shape().layout(), literal.shape().layout()));
LiteralTestUtil::ExpectR2Equal<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, result);
}
XLA_TEST_F(TransferManagerTest, TransferTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferEmptyTuple) {
Literal literal = LiteralUtil::MakeTuple({});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferNestedTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-10.0f, 123.0f})});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferComplexValue) {
Literal literal = LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferComplexValueInTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)}),
LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6}),
LiteralUtil::CreateR0<complex64>(complex64(0.3f, -0.4f))});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferTokenFromDevice) {
auto device_buffer = AllocateDeviceBuffer(ShapeUtil::MakeTokenShape());
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateToken(), result));
}
XLA_TEST_F(TransferManagerTest, OVERSIZE_ON_GRM(MultiStreamRoundTripSoak)) {
const int64_t kIterationCount = 5000;
Literal literal1 = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-10.0f, 123.0f})});
Literal literal2 = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(456.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{5.0f, 7.0f}, {9.0f, 4.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -11.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-98.0f, 153.0f})});
auto device_buffer1 = AllocateDeviceBuffer(literal1.shape());
auto device_buffer2 = AllocateDeviceBuffer(literal2.shape());
auto stream1 = stream_;
auto stream2 = stream_->GetOrCreateSubStream().value();
Literal result1, result2;
for (int i = 0; i < kIterationCount; ++i) {
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream1, literal1,
device_buffer1));
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream2, literal2,
device_buffer2));
TF_ASSERT_OK_AND_ASSIGN(
Literal this_result1,
transfer_manager_->TransferLiteralFromDevice(stream1, device_buffer1));
TF_ASSERT_OK_AND_ASSIGN(
Literal this_result2,
transfer_manager_->TransferLiteralFromDevice(stream2, device_buffer2));
result1 = std::move(this_result1);
result2 = std::move(this_result2);
}
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, result1));
EXPECT_TRUE(LiteralTestUtil::Equal(literal2, result2));
}
XLA_TEST_F(TransferManagerTest, DISABLED_ON_TPU(TransferDynamicShape)) {
TF_ASSERT_OK_AND_ASSIGN(
Shape s, ParseShape("(s64[], s32[<=1048576,3], f32[<=1048576,48])"));
Literal literal(s);
literal.SetDynamicSize(0, {1},
1048574);
literal.SetDynamicSize(0, {2},
1048575);
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {0})
.Populate<int64_t>(
[](absl::Span<const int64_t> indices) { return 42; }));
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {1})
.Populate<int32_t>([](absl::Span<const int64_t> indices) {
return indices[0] + indices[1];
}));
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {2})
.Populate<float>([](absl::Span<const int64_t> indices) {
return indices[0] + indices[1];
}));
ScopedShapedBuffer device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_EQ(literal.GetDynamicSize(0, {1}),
result.GetDynamicSize(0, {1}));
EXPECT_EQ(literal.GetDynamicSize(0, {2}),
result.GetDynamicSize(0, {2}));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
class TransferDeviceToHostBenchmark : public TransferManagerTest {
public:
using TransferManagerTest::TransferManagerTest;
~TransferDeviceToHostBenchmark() override {}
void Run(::testing::benchmark::State& state, int num_tuple_elements,
int array_size) {
SetUp();
std::vector<Literal> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal.shape());
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
for (auto s : state) {
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
}
TearDown();
}
void TestBody() override {}
};
class TransferHostToDeviceBenchmark : public TransferManagerTest {
public:
using TransferManagerTest::TransferManagerTest;
~TransferHostToDeviceBenchmark() override {}
void Run(::testing::benchmark::State& state, int num_tuple_elements,
int array_size) {
SetUp();
std::vector<Literal> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal.shape());
for (auto s : state) {
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
}
TearDown();
}
void TestBody() override {}
};
void BM_TransferDeviceToHost(::testing::benchmark::State& state) {
const int num_tuple_elements = state.range(0);
const int array_size = state.range(1);
TransferDeviceToHostBenchmark bm;
bm.Run(state, num_tuple_elements, array_size);
}
void BM_TransferHostToDevice(::testing::benchmark::State& state) {
const int num_tuple_elements = state.range(0);
const int array_size = state.range(1);
TransferHostToDeviceBenchmark bm;
bm.Run(state, num_tuple_elements, array_size);
}
BENCHMARK(BM_TransferHostToDevice)
->ArgPair(1, 256)
->ArgPair(1, 257)
->ArgPair(100, 256)
->ArgPair(100, 257);
BENCHMARK(BM_TransferDeviceToHost)
->ArgPair(1, 256)
->ArgPair(1, 257)
->ArgPair(100, 256)
->ArgPair(100, 257);
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
tsl::testing::RunBenchmarks();
return RUN_ALL_TESTS();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transfer_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/transfer_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7652dcbf-c7f3-49b3-a1bf-6885ef98c58c | cpp | tensorflow/tensorflow | batchnorm_expander | third_party/xla/xla/service/batchnorm_expander.cc | third_party/xla/xla/service/batchnorm_expander_test.cc | #include "xla/service/batchnorm_expander.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::optional;
class BatchNormExpanderVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
absl::Status HandleBatchNormInference(HloInstruction* batch_norm) override;
absl::Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
static bool Run(HloComputation* computation, bool rewrite_training_op,
bool rewrite_inference_op, bool rewrite_grad_op);
~BatchNormExpanderVisitor() override = default;
private:
explicit BatchNormExpanderVisitor(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op)
: computation_(computation),
rewrite_training_op_(rewrite_training_op),
rewrite_inference_op_(rewrite_inference_op),
rewrite_grad_op_(rewrite_grad_op) {}
HloComputation* GetOrCreateScalarAddComputation(
PrimitiveType primitive_type) {
HloComputation::Builder b("scalar_add_computation");
Shape shape = ShapeUtil::MakeShape(primitive_type, {});
auto scalar_lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, shape, "scalar_lhs"));
auto scalar_rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, shape, "scalar_rhs"));
auto scalar_op = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, scalar_lhs, scalar_rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
}
std::unique_ptr<HloInstruction> Rsqrt(HloInstruction* operand) {
return HloInstruction::CreateUnary(operand->shape(), HloOpcode::kRsqrt,
operand);
}
std::unique_ptr<HloInstruction> Mean(
HloInstruction* element_count, HloInstruction* operand,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)>
add_instruction) {
auto broadcast = add_instruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(operand->shape()), element_count, {}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kDivide,
operand, broadcast);
}
std::unique_ptr<HloInstruction> DynamicElementCountPerFeature(
HloInstruction* operand, int64_t feature_index,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)>
add_instruction) {
auto elements_per_feature_s32 = add_instruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
for (int64_t i = 0; i < operand->shape().rank(); ++i) {
if (i == feature_index) {
continue;
}
auto dynamic_dimension_size =
add_instruction(HloInstruction::CreateGetDimensionSize(
ShapeUtil::MakeShape(S32, {}), operand, i));
elements_per_feature_s32 = add_instruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
dynamic_dimension_size, elements_per_feature_s32));
}
return HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
elements_per_feature_s32);
}
HloComputation* computation_;
bool rewrite_training_op_;
bool rewrite_inference_op_;
bool rewrite_grad_op_;
};
}
bool BatchNormExpanderVisitor::Run(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op) {
BatchNormExpanderVisitor visitor(
computation,
rewrite_training_op,
rewrite_inference_op,
rewrite_grad_op);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormTraining(
HloInstruction* batch_norm) {
if (!rewrite_training_op_) {
return absl::OkStatus();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64_t instruction_count_before = computation_->instruction_count();
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
PrimitiveType ptype = operand_shape.element_type();
int64_t feature_index = batch_norm->feature_index();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
const Shape feature_shape = scale->shape();
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
Shape scalar_broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
auto epsilon = add(HloInstruction::CreateBroadcast(
scalar_broadcast_shape,
add(HloInstruction::CreateConstant(std::move(epsilon_literal))), {}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = operand_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto elements_per_feature =
add(DynamicElementCountPerFeature(operand, feature_index, add));
auto feature_broadcast = [&](HloInstruction* inst) -> HloInstruction* {
Shape feature_broadcast_shape = scalar_broadcast_shape;
feature_broadcast_shape.set_dynamic_dimension(
feature_index, inst->shape().is_dynamic_dimension(0));
return add(HloInstruction::CreateBroadcast(feature_broadcast_shape, inst,
{feature_index}));
};
auto scale_broadcasted = feature_broadcast(scale);
auto offset_broadcasted = feature_broadcast(offset);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
auto operand_squared =
add_binary(operand_shape, HloOpcode::kMultiply, operand, operand);
auto sum = add(HloInstruction::CreateReduce(feature_shape, operand, zero,
dimensions_without_feature,
add_reduce_computation));
auto squared_sum = add(HloInstruction::CreateReduce(
feature_shape, operand_squared, zero, dimensions_without_feature,
add_reduce_computation));
auto mean = add(Mean(elements_per_feature, sum, add));
auto mean_broadcasted = feature_broadcast(mean);
auto square_mean = add(Mean(elements_per_feature, squared_sum, add));
auto mean_square =
add_binary(feature_shape, HloOpcode::kMultiply, mean, mean);
auto var =
add_binary(feature_shape, HloOpcode::kSubtract, square_mean, mean_square);
auto var_broadcasted = feature_broadcast(var);
auto var_add_epsilon = add_binary(var_broadcasted->shape(), HloOpcode::kAdd,
var_broadcasted, epsilon);
auto rsqrt_var_add_epsilon = add(Rsqrt(var_add_epsilon));
auto operand_minus_mean = add_binary(operand_shape, HloOpcode::kSubtract,
operand, mean_broadcasted);
auto normalized = add_binary(operand_shape, HloOpcode::kMultiply,
operand_minus_mean, rsqrt_var_add_epsilon);
auto scaled_normalized = add_binary(operand_shape, HloOpcode::kMultiply,
normalized, scale_broadcasted);
auto shifted_normalized = add_binary(operand_shape, HloOpcode::kAdd,
scaled_normalized, offset_broadcasted);
auto tuple = HloInstruction::CreateTuple({shifted_normalized, mean, var});
if (batch_norm->has_sharding()) {
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
const HloSharding& sharding = batch_norm->sharding();
HloSharding operand_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
optional<int64_t> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(operand_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return absl::OkStatus();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormInference(
HloInstruction* batch_norm) {
if (!rewrite_inference_op_) {
return absl::OkStatus();
}
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
int64_t feature_index = batch_norm->feature_index();
PrimitiveType ptype = operand_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
HloInstruction* mean = batch_norm->mutable_operand(3);
HloInstruction* var = batch_norm->mutable_operand(4);
const Shape feature_shape = scale->shape();
Shape scalar_broadcast_shape = ShapeUtil::MakeStaticShape(feature_shape);
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon = computation_->AddInstruction(HloInstruction::CreateBroadcast(
scalar_broadcast_shape,
computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(epsilon_literal))),
{}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = operand_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
auto feature_broadcast = [&](HloInstruction* a) {
Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
broadcast_shape.set_dynamic_dimension(feature_index,
a->shape().is_dynamic_dimension(0));
return add(
HloInstruction::CreateBroadcast(broadcast_shape, a, {feature_index}));
};
int64_t instruction_count_before = computation_->instruction_count();
auto true_scale = add_binary(
feature_shape, HloOpcode::kMultiply, scale,
add(Rsqrt(add_binary(feature_shape, HloOpcode::kAdd, var, epsilon))));
auto true_shift = add_binary(
feature_shape, HloOpcode::kSubtract, offset,
add_binary(feature_shape, HloOpcode::kMultiply, mean, true_scale));
auto shifted_normalized =
add_binary(operand_shape, HloOpcode::kAdd,
add_binary(operand_shape, HloOpcode::kMultiply, operand,
feature_broadcast(true_scale)),
feature_broadcast(true_shift));
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
optional<int64_t> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(sharding);
} else {
inst->set_sharding(default_sharding);
}
}
shifted_normalized->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceInstruction(batch_norm, shifted_normalized));
return absl::OkStatus();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormGrad(
HloInstruction* batch_norm) {
if (!rewrite_grad_op_) {
return absl::OkStatus();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64_t instruction_count_before = computation_->instruction_count();
HloInstruction* activation = batch_norm->mutable_operand(0);
const Shape activation_shape = activation->shape();
PrimitiveType ptype = activation_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
const Shape feature_shape = scale->shape();
HloInstruction* mean = batch_norm->mutable_operand(2);
HloInstruction* variance = batch_norm->mutable_operand(3);
HloInstruction* grad_output = batch_norm->mutable_operand(4);
int64_t feature_index = batch_norm->feature_index();
auto elements_per_feature =
add(DynamicElementCountPerFeature(activation, feature_index, add));
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon_scalar =
add(HloInstruction::CreateConstant(std::move(epsilon_literal)));
auto epsilon_activation = add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(activation_shape), epsilon_scalar, {}));
auto epsilon_feature = add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(feature_shape), epsilon_scalar, {}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = activation_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto activation_broadcast = [&](HloInstruction* hlo) -> HloInstruction* {
Shape broadcast_shape = ShapeUtil::MakeStaticShape(activation_shape);
broadcast_shape.set_dynamic_dimension(feature_index,
hlo->shape().is_dynamic_dimension(0));
return add(
HloInstruction::CreateBroadcast(broadcast_shape, hlo, {feature_index}));
};
auto scale_broadcasted = activation_broadcast(scale);
auto variance_broadcasted = activation_broadcast(variance);
auto mean_broadcasted = activation_broadcast(mean);
auto rsqrt_var_add_epsilon_broadcasted =
add(Rsqrt(add_binary(variance_broadcasted->shape(), HloOpcode::kAdd,
variance_broadcasted, epsilon_activation)));
auto rsqrt_var_add_epsilon = add(Rsqrt(
add_binary(feature_shape, HloOpcode::kAdd, variance, epsilon_feature)));
auto activation_minus_mean = add_binary(
activation_shape, HloOpcode::kSubtract, activation, mean_broadcasted);
auto grad_output_times_activation_minus_mean =
add_binary(activation_shape, HloOpcode::kMultiply, grad_output,
activation_minus_mean);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
auto sum_grad_output_times_activation_minus_mean =
add(HloInstruction::CreateReduce(
feature_shape, grad_output_times_activation_minus_mean, zero,
dimensions_without_feature, add_reduce_computation));
auto grad_beta = add(HloInstruction::CreateReduce(
feature_shape, grad_output, zero, dimensions_without_feature,
add_reduce_computation));
auto grad_scale = add_binary(feature_shape, HloOpcode::kMultiply,
sum_grad_output_times_activation_minus_mean,
rsqrt_var_add_epsilon);
auto i2 = activation_broadcast(grad_beta);
auto i3 = activation_broadcast(sum_grad_output_times_activation_minus_mean);
auto i4 = add_binary(activation_shape, HloOpcode::kMultiply, i3,
activation_minus_mean);
auto i5 =
add_binary(activation_shape, HloOpcode::kDivide, i4,
add_binary(variance_broadcasted->shape(), HloOpcode::kAdd,
variance_broadcasted, epsilon_activation));
Shape scale_times_rsqrt_var_add_epsilon_shape = scale_broadcasted->shape();
for (int64_t i = 0; i < rsqrt_var_add_epsilon_broadcasted->shape().rank();
++i) {
if (rsqrt_var_add_epsilon_broadcasted->shape().is_dynamic_dimension(i)) {
scale_times_rsqrt_var_add_epsilon_shape.set_dynamic_dimension(i, true);
}
}
auto scale_times_rsqrt_var_add_epsilon =
add_binary(scale_times_rsqrt_var_add_epsilon_shape, HloOpcode::kMultiply,
scale_broadcasted, rsqrt_var_add_epsilon_broadcasted);
scale_times_rsqrt_var_add_epsilon =
add(Mean(elements_per_feature, scale_times_rsqrt_var_add_epsilon, add));
auto i1 = add_binary(grad_output->shape(), HloOpcode::kMultiply, grad_output,
add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(activation_shape),
elements_per_feature, {})));
auto i6 = add_binary(
activation_shape, HloOpcode::kSubtract,
add_binary(activation_shape, HloOpcode::kSubtract, i1, i2), i5);
auto grad_activation = add_binary(activation_shape, HloOpcode::kMultiply,
scale_times_rsqrt_var_add_epsilon, i6);
auto tuple =
HloInstruction::CreateTuple({grad_activation, grad_scale, grad_beta});
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
HloSharding activation_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
auto unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), activation_shape)) {
inst->set_sharding(activation_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return absl::OkStatus();
}
absl::StatusOr<bool> BatchNormExpander::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), before:\n" + module->ToString());
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (BatchNormExpanderVisitor::Run(computation, rewrite_training_op_,
rewrite_inference_op_,
rewrite_grad_op_)) {
changed = true;
}
}
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/batchnorm_expander.h"
#include <memory>
#include <utility>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class BatchNormExpanderTest : public HloTestBase {
protected:
int64_t CountGetDimensionSize(const HloModule& module) {
int64_t count = 0;
for (HloComputation* comp : module.computations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->opcode() == HloOpcode::kGetDimensionSize) {
count++;
}
}
}
return count;
}
};
TEST_F(BatchNormExpanderTest, BatchNormTraining) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
Shape scale_shape = ShapeUtil::MakeShape(F32, {2});
Shape offset_shape = ShapeUtil::MakeShape(F32, {2});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "activation"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scale_shape, "scale"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, offset_shape, "offset"));
builder.AddInstruction(HloInstruction::CreateBatchNormTraining(
ShapeUtil::MakeTupleShape({input_shape, scale_shape, offset_shape}),
param0, param1, param2,
0.001, 3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBatchNormTraining);
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(CountGetDimensionSize(*module), 3);
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
TEST_F(BatchNormExpanderTest, BatchNormGrad) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
Shape scale_shape = ShapeUtil::MakeShape(F32, {2});
Shape mean_shape = ShapeUtil::MakeShape(F32, {2});
Shape var_shape = ShapeUtil::MakeShape(F32, {2});
Shape grad_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "activation"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scale_shape, "scale"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, mean_shape, "mean"));
HloInstruction* param3 = builder.AddInstruction(
HloInstruction::CreateParameter(3, var_shape, "var"));
HloInstruction* param4 = builder.AddInstruction(
HloInstruction::CreateParameter(4, grad_output_shape, "grad_output"));
builder.AddInstruction(HloInstruction::CreateBatchNormGrad(
ShapeUtil::MakeTupleShape({input_shape, scale_shape, mean_shape}), param0,
param1, param2, param3, param4,
0.001, 3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBatchNormGrad);
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(CountGetDimensionSize(*module), 3);
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
TEST_F(BatchNormExpanderTest, BatchNormTrainingSharding) {
const char* module_str = R"(
HloModule module
ENTRY entry {
%param.0 = f32[8,4] parameter(0)
%param.1 = f32[4] parameter(1)
%param.2 = f32[4] parameter(2)
ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
epsilon=0.001, feature_index=1, sharding={{maximal device=1},{maximal device=1},{maximal device=1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(m.get()).value());
for (auto* instruction : m->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
auto device = instruction->sharding_unique_device();
ASSERT_TRUE(device);
EXPECT_EQ(*device, 1);
}
}
TEST_F(BatchNormExpanderTest, Execution) {
const char* module_str = R"(
HloModule module
ENTRY entry {
%param.0 = f32[8,4] parameter(0)
%param.1 = f32[4] parameter(1)
%param.2 = f32[4] parameter(2)
ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
epsilon=0.001, feature_index=1, sharding={{maximal device=1},{maximal device=1},{maximal device=1}}
})";
EXPECT_TRUE(RunAndCompare(module_str, ErrorSpec{1e-4, 1e-4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batchnorm_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batchnorm_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac7d7a37-aae2-47ed-ba55-e28f46329953 | cpp | tensorflow/tensorflow | layout_assignment | third_party/xla/xla/service/gpu/transforms/layout_assignment.cc | third_party/xla/xla/service/gpu/transforms/layout_assignment_test.cc | #include "xla/service/gpu/transforms/layout_assignment.h"
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tsl/util/env_var.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
using se::dnn::DataLayout;
using se::dnn::FilterLayout;
static std::tuple<DataLayout, FilterLayout, DataLayout>
HeuristicLayoutAssignment(const HloInstruction* instr,
const se::GpuComputeCapability& gpu_version,
const se::dnn::VersionInfo& dnn_version) {
constexpr auto kAllNCHW =
std::make_tuple(DataLayout::kBatchDepthYX, FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX);
constexpr auto kAllNCHW_VECT_C =
std::make_tuple(DataLayout::kBatchDepthYX4, FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4);
constexpr auto kAllNHWC =
std::make_tuple(DataLayout::kBatchYXDepth, FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth);
const ConvolutionDimensionNumbers& dnums =
instr->convolution_dimension_numbers();
Shape input_shape = instr->operand(0)->shape();
PrimitiveType input_ty = instr->operand(0)->shape().element_type();
if (primitive_util::IsIntegralType(input_ty)) {
if (input_ty == S8 && dnums.input_spatial_dimensions_size() == 2 &&
input_shape.dimensions_size() == 5) {
VLOG(2) << "Using NCHW_VECT_C for int8_t conv " << instr->ToString();
return kAllNCHW_VECT_C;
}
VLOG(2) << "Using NHWC for int8_t conv " << instr->ToString();
return kAllNHWC;
}
if (primitive_util::IsF8Type(input_ty)) {
VLOG(2) << "Using NHWC for FP8 conv " << instr->ToString();
return kAllNHWC;
}
const DebugOptions& debug_options =
instr->GetModule()->config().debug_options();
if (debug_options.xla_gpu_force_conv_nchw()) {
VLOG(2) << "Overriding layout to NCHW for " << instr->ToString();
return kAllNCHW;
}
if (debug_options.xla_gpu_force_conv_nhwc()) {
VLOG(2) << "Overriding layout to NHWC for " << instr->ToString();
return kAllNHWC;
}
const auto* rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_version);
if (rocm_compute_capability && input_ty == F16) return kAllNHWC;
const bool isFloat16 = (input_ty == F16) || (input_ty == BF16);
if (std::holds_alternative<se::CudaComputeCapability>(gpu_version)) {
const auto* cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_version);
bool is_volta =
cuda_compute_capability &&
cuda_compute_capability->IsAtLeast(se::CudaComputeCapability::VOLTA);
if (!isFloat16 || !is_volta ||
instr->shape().tuple_shapes(0).dimensions_size() != 4) {
return kAllNCHW;
}
if (std::make_tuple(dnn_version.major_version(),
dnn_version.minor_version()) <= std::make_tuple(7, 3) &&
instr->custom_call_target() == kCudnnConvBackwardInputCallTarget &&
window_util::HasStride(instr->window())) {
return kAllNCHW;
}
} else if (std::holds_alternative<se::RocmComputeCapability>(gpu_version)) {
bool is_enabled = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_USE_ROCM_NHWC",
false, &is_enabled));
auto rocm_compute_capability =
std::get<se::RocmComputeCapability>(gpu_version);
if (!isFloat16 || (!rocm_compute_capability.has_nhwc_layout_support()) ||
instr->shape().tuple_shapes(0).dimensions_size() != 4 || !is_enabled) {
return kAllNCHW;
}
}
VLOG(2) << "Using heuristic to figure out layouts for " << instr->ToString();
return kAllNHWC;
}
absl::Status GpuLayoutAssignment::AddBackendConstraintsToDnnConvCustomCall(
HloCustomCallInstruction* instr, LayoutConstraints* constraints) {
Shape lhs_shape = instr->operand(0)->shape();
Shape rhs_shape = instr->operand(1)->shape();
Shape result_shape = instr->shape().tuple_shapes(0);
Shape* input_shape;
Shape* filter_shape;
Shape* output_shape;
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instr));
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
input_shape = &lhs_shape;
filter_shape = &rhs_shape;
output_shape = &result_shape;
break;
case CudnnConvKind::kBackwardInput:
input_shape = &result_shape;
filter_shape = &rhs_shape;
output_shape = &lhs_shape;
break;
case CudnnConvKind::kBackwardFilter:
input_shape = &lhs_shape;
filter_shape = &result_shape;
output_shape = &rhs_shape;
break;
}
{
DataLayout input;
FilterLayout filter;
DataLayout output;
std::tie(input, filter, output) =
HeuristicLayoutAssignment(instr, gpu_version_, dnn_version_);
TF_ASSIGN_OR_RETURN(
std::tie(*input_shape->mutable_layout(),
*filter_shape->mutable_layout(),
*output_shape->mutable_layout()),
StreamExecutorConvLayoutsToXlaLayouts(
instr->convolution_dimension_numbers(), input, filter, output));
}
TF_ASSIGN_OR_RETURN(
const LogicalBuffer* call_result_buf,
points_to_analysis_->GetBufferDefinedAt(instr, {0}));
TF_RETURN_IF_ERROR(SetOperandLayout(lhs_shape, instr, 0));
TF_RETURN_IF_ERROR(SetOperandLayout(rhs_shape, instr, 1));
TF_RETURN_IF_ERROR(SetBufferLayout(result_shape.layout(), *call_result_buf));
if (kind == CudnnConvKind::kForwardActivation &&
instr->operand_count() == 4) {
TF_RETURN_IF_ERROR(SetOperandLayout(*output_shape, instr, 3));
}
if (kind == CudnnConvKind::kForwardGraph) {
for (int k = 2; k < instr->operand_count(); ++k) {
if (!ShapeUtil::IsScalar(instr->operand(k)->shape())) {
TF_RETURN_IF_ERROR(SetOperandLayout(*output_shape, instr, k));
}
}
}
if (instr->operand_count() > 2 && kind != CudnnConvKind::kForwardActivation &&
kind != CudnnConvKind::kForwardGraph) {
return Internal(
"Invalid convolution. Conv has a side input, but kind is not fused "
"conv forward or graph conv foward: %s",
instr->ToString());
}
return absl::OkStatus();
}
namespace {
void SetFortranLayout(Shape* shape) {
LayoutUtil::SetToDefaultLayout(shape);
int n = shape->mutable_layout()->minor_to_major_size();
CHECK_GE(n, 2);
std::swap(shape->mutable_layout()->mutable_minor_to_major()->at(0),
shape->mutable_layout()->mutable_minor_to_major()->at(1));
}
bool DotCanSupportShapeWithLayout(const HloInstruction* dot,
const Shape& shape) {
const DotDimensionNumbers& dot_dims = dot->dot_dimension_numbers();
return MatrixLayout::For(shape, dot_dims.lhs_batch_dimensions().size(),
dot->operand(0)->shape().rank() -
dot_dims.lhs_contracting_dimensions().size() -
dot_dims.lhs_batch_dimensions().size(),
dot_dims.rhs_batch_dimensions().size(),
dot->operand(1)->shape().rank() -
dot_dims.rhs_contracting_dimensions().size() -
dot_dims.rhs_batch_dimensions().size())
.ok();
}
}
absl::Status GpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
auto post_order = constraints->computation()->MakeInstructionPostOrder();
for (auto iterator = post_order.rbegin(); iterator != post_order.rend();
++iterator) {
HloInstruction* instruction = *iterator;
if (IsCustomCallToDnnConvolution(*instruction)) {
TF_RETURN_IF_ERROR(AddBackendConstraintsToDnnConvCustomCall(
Cast<HloCustomCallInstruction>(instruction), constraints));
}
CHECK(!IsCublasGemm(*instruction))
<< "Gemm rewriting should run after layout assignment";
if (instruction->opcode() == HloOpcode::kDot) {
const Shape& output_shape = instruction->shape();
const Shape& lhs_shape = instruction->operand(0)->shape();
const Shape& rhs_shape = instruction->operand(1)->shape();
const DotDimensionNumbers& dot_dims =
instruction->dot_dimension_numbers();
absl::Span<const int64_t> lhs_batch_dims =
dot_dims.lhs_batch_dimensions();
absl::Span<const int64_t> lhs_contracting_dims =
dot_dims.lhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> lhs_non_contracting_dims,
GetNonContractingDims(lhs_shape, lhs_batch_dims,
lhs_contracting_dims));
absl::Span<const int64_t> rhs_batch_dims =
dot_dims.rhs_batch_dimensions();
absl::Span<const int64_t> rhs_contracting_dims =
dot_dims.rhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> rhs_non_contracting_dims,
GetNonContractingDims(rhs_shape, rhs_batch_dims,
rhs_contracting_dims));
const DebugOptions& debug_options =
instruction->GetModule()->config().debug_options();
bool is_bf16_to_bf16 =
(output_shape.element_type() == PrimitiveType::BF16 &&
lhs_shape.element_type() == PrimitiveType::BF16 &&
rhs_shape.element_type() == PrimitiveType::BF16);
bool is_s8_to_s32 = (output_shape.element_type() == PrimitiveType::S32 &&
lhs_shape.element_type() == PrimitiveType::S8 &&
rhs_shape.element_type() == PrimitiveType::S8 &&
output_shape.dimensions_size() == 2 &&
lhs_shape.dimensions_size() == 2 &&
rhs_shape.dimensions_size() == 2);
bool is_fp8_to_fp8 =
(lhs_shape.element_type() == PrimitiveType::F8E4M3FN &&
rhs_shape.element_type() == PrimitiveType::F8E4M3FN);
if (is_s8_to_s32 || is_fp8_to_fp8 ||
(is_bf16_to_bf16 &&
debug_options.xla_gpu_ensure_minor_dot_contraction_dims())) {
TF_RETURN_IF_ERROR(SetOperandMajorToMinorLayout(
instruction, 0,
{lhs_batch_dims, lhs_non_contracting_dims, lhs_contracting_dims}));
TF_RETURN_IF_ERROR(SetOperandMajorToMinorLayout(
instruction, 1,
{rhs_batch_dims, rhs_non_contracting_dims, rhs_contracting_dims}));
TF_RETURN_IF_ERROR(SetDotLayout(instruction, constraints));
} else {
if (!lhs_batch_dims.empty() || lhs_contracting_dims.size() > 1 ||
lhs_non_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotOperandLayout(instruction, 0, lhs_batch_dims,
lhs_contracting_dims,
lhs_non_contracting_dims));
}
if (!rhs_batch_dims.empty() || rhs_non_contracting_dims.size() > 1 ||
rhs_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotOperandLayout(instruction, 1, rhs_batch_dims,
rhs_contracting_dims,
rhs_non_contracting_dims));
}
if (!lhs_batch_dims.empty() || lhs_non_contracting_dims.size() > 1 ||
rhs_non_contracting_dims.size() > 1) {
TF_RETURN_IF_ERROR(SetDotLayout(instruction, constraints));
}
}
} else if (instruction->opcode() == HloOpcode::kTranspose) {
const HloInstruction* operand = instruction->operand(0);
if ((operand->opcode() != HloOpcode::kDot) ||
(operand->user_count() > 1)) {
continue;
}
Shape shape = operand->shape();
*shape.mutable_layout() =
LayoutUtil::MakeLayoutFromMajorToMinor(instruction->dimensions());
if (DotCanSupportShapeWithLayout(operand, shape)) {
TF_RETURN_IF_ERROR(
SetOperandLayout(shape, instruction, 0));
}
} else if (instruction->opcode() == HloOpcode::kFft) {
Shape op0_shape = instruction->operand(0)->shape();
LayoutUtil::SetToDefaultLayout(&op0_shape);
Shape output_shape = instruction->shape();
LayoutUtil::SetToDefaultLayout(&output_shape);
TF_RETURN_IF_ERROR(SetOperandLayout(op0_shape, instruction, 0));
TF_RETURN_IF_ERROR(SetInstructionLayout(output_shape, instruction));
} else if (instruction->opcode() == HloOpcode::kSort &&
instruction->operand(0)->shape().rank() > 1) {
Shape keys_shape = instruction->operand(0)->shape();
Layout keys_layout =
LayoutUtil::GetDefaultLayoutForRank(keys_shape.rank());
for (int64_t i = 0; i < instruction->operand_count(); ++i) {
Shape shape = instruction->operand(i)->shape();
*shape.mutable_layout() = keys_layout;
TF_RETURN_IF_ERROR(SetOperandLayout(shape, instruction, i));
const LogicalBuffer* output_buffer;
if (instruction->shape().IsArray()) {
TF_ASSIGN_OR_RETURN(
output_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {}));
} else {
TF_ASSIGN_OR_RETURN(
output_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {i}));
}
TF_RETURN_IF_ERROR(SetBufferLayout(keys_layout, *output_buffer));
}
} else if (IsCustomCallToTopK(*instruction)) {
Layout default_layout = LayoutUtil::GetDefaultLayoutForRank(
instruction->operand(0)->shape().rank());
TF_ASSIGN_OR_RETURN(
auto values_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {0}));
TF_RETURN_IF_ERROR(SetBufferLayout(default_layout, *values_buffer));
TF_ASSIGN_OR_RETURN(
auto indices_buffer,
points_to_analysis_->GetBufferDefinedAt(instruction, {1}));
TF_RETURN_IF_ERROR(SetBufferLayout(default_layout, *indices_buffer));
} else if (instruction->opcode() == HloOpcode::kTriangularSolve) {
Shape op0_shape = instruction->operand(0)->shape();
Shape op1_shape = instruction->operand(1)->shape();
Shape output_shape = instruction->shape();
SetFortranLayout(&op0_shape);
SetFortranLayout(&op1_shape);
SetFortranLayout(&output_shape);
TF_RETURN_IF_ERROR(SetOperandLayout(op0_shape, instruction, 0));
TF_RETURN_IF_ERROR(SetOperandLayout(op1_shape, instruction, 1));
TF_RETURN_IF_ERROR(SetInstructionLayout(output_shape, instruction));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else if (instruction->opcode() == HloOpcode::kAllToAll &&
instruction->shape().IsArray()) {
auto* all_to_all = Cast<HloAllToAllInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(all_to_all->shape(),
*all_to_all->split_dimension()),
all_to_all));
} else if (instruction->opcode() == HloOpcode::kSend) {
Shape s = instruction->operand(0)->shape();
LayoutUtil::SetToDefaultLayout(&s);
TF_RETURN_IF_ERROR(SetInstructionLayout(s, instruction->operand(0)));
TF_RETURN_IF_ERROR(
SetArrayOperandLayout(s.layout(), instruction->operand(0), 0));
} else if (instruction->opcode() == HloOpcode::kRecv) {
Shape s = instruction->shape();
ShapeUtil::ForEachMutableSubshape(
&s, [&](Shape* subshape, const ShapeIndex& index) {
LayoutUtil::SetToDefaultLayout(subshape);
});
TF_RETURN_IF_ERROR(SetInstructionLayout(s, instruction));
}
}
return absl::OkStatus();
}
absl::Status GpuLayoutAssignment::SetDotOperandLayout(
const HloInstruction* instruction, int64_t operand,
absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims,
absl::Span<const int64_t> col_dims) {
Shape shape = instruction->operand(operand)->shape();
if (shape.has_layout() &&
MatrixLayout::For(shape, batch_dims, row_dims, col_dims).ok())
return SetOperandLayout(shape, instruction, operand);
LayoutUtil::SetToDefaultLayout(&shape);
if (MatrixLayout::For(shape, batch_dims, row_dims, col_dims).ok())
return SetOperandLayout(shape, instruction, operand);
return SetOperandMajorToMinorLayout(
instruction, operand,
{batch_dims, row_dims, col_dims});
}
absl::Status GpuLayoutAssignment::SetOperandMajorToMinorLayout(
const HloInstruction* instruction, int64_t operand,
std::initializer_list<absl::Span<const int64_t>> dim_groups) {
size_t size = 0;
for (auto group : dim_groups) size += group.size();
std::vector<int64_t> major_to_minor;
major_to_minor.reserve(size);
for (const auto& group : dim_groups) {
major_to_minor.insert(major_to_minor.end(), group.begin(), group.end());
}
Shape shape = instruction->operand(operand)->shape();
*shape.mutable_layout() =
LayoutUtil::MakeLayoutFromMajorToMinor(major_to_minor);
return SetOperandLayout(shape, instruction, operand);
}
absl::Status GpuLayoutAssignment::SetDotLayout(
const HloInstruction* instruction, LayoutConstraints* constraints) {
for (const HloInstruction* user : instruction->users()) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) != instruction) {
continue;
}
const ShapeLayout* constraint = constraints->OperandLayout(user, i);
if ((constraint != nullptr) &&
DotCanSupportShapeWithLayout(instruction, constraint->shape())) {
return SetInstructionLayout(constraint->shape(), instruction);
}
}
}
return SetInstructionLayout(
LayoutUtil::GetWithDefaultLayout(instruction->shape()), instruction);
}
bool GpuLayoutAssignment::PropagateReductionLayoutToOperand(
const HloInstruction* user) {
int64_t reduction_size = 1;
for (int64_t reduction_dim : user->dimensions()) {
reduction_size *= user->operand(0)->shape().dimensions(reduction_dim);
}
int64_t kept_dimension_size = ShapeUtil::ElementsIn(user->shape());
return IsUnnestedReductionFasterThanElemental(
{true, {1, kept_dimension_size, reduction_size}});
}
bool GpuLayoutAssignment::InstructionCanChangeLayoutInstance(
const HloInstruction* instruction) {
const HloCustomCallInstruction* custom_call =
DynCast<HloCustomCallInstruction>(instruction);
if (custom_call != nullptr &&
(custom_call->custom_call_target() ==
host_memory_offload_annotations::kMoveToHostCustomCallTarget ||
custom_call->custom_call_target() ==
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget ||
custom_call->custom_call_target() == kTopKCustomCallTarget)) {
return false;
}
return LayoutAssignment::InstructionCanChangeLayoutInstance(instruction);
}
}
} | #include "xla/service/gpu/transforms/layout_assignment.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::tsl::testing::IsOkAndHolds;
class LayoutAssignmentTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
se::GpuComputeCapability GetGpuComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
se::dnn::VersionInfo GetDnnVersion() {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor(),
se::dnn::VersionInfo{8, 3, 0});
}
};
TEST_F(LayoutAssignmentTest, Elementwise) {
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
Shape ashape_in_row_major(ashape);
Shape ashape_in_col_major(ashape);
*ashape_in_row_major.mutable_layout() = LayoutUtil::MakeLayout({1, 0});
*ashape_in_col_major.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
for (const Shape& lhs_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
for (const Shape& rhs_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
for (const Shape& result_shape_with_layout :
{ashape_in_row_major, ashape_in_col_major}) {
auto builder = HloComputation::Builder(TestName());
auto x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "x"));
auto y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ashape, "y"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(ashape, HloOpcode::kAdd, x, y));
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(builder.Build(add));
ComputationLayout computation_layout(
computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(lhs_shape_with_layout);
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(rhs_shape_with_layout);
*computation_layout.mutable_result_layout() =
ShapeLayout(result_shape_with_layout);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
for (const HloInstruction* operand : add->operands()) {
EXPECT_TRUE(LayoutUtil::Equal(add->shape().layout(),
operand->shape().layout()));
}
}
}
}
}
TEST_F(LayoutAssignmentTest, DotLayoutUnchangedIfValid) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,2,3]{1,2,0} parameter(0)
p1 = f32[5,3,4]{1,2,0} parameter(1)
ROOT dot.1330.10585 = f32[5,2,4]{2,1,0} dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 2, 3}, {1, 2, 0}),
m::Op().WithShape(F32, {5, 3, 4}, {1, 2, 0}))
.WithShape(F32, {5, 2, 4}, {2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, DotLayoutSetToDefaultIfDefaultValid) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,3,2] parameter(0)
p1 = f32[5,4,3]{0,1,2} parameter(1)
ROOT dot.1330.10585 = f32[5,2,4] dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={1},
rhs_batch_dims={0}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 3, 2}, {2, 1, 0}),
m::Op().WithShape(F32, {5, 4, 3}, {2, 1, 0}))
.WithShape(F32, {5, 2, 4}, {2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, DotOperandLayoutSetToBatchRowsColsOtherwise) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[2,3,5]{2,1,0} parameter(0)
p1 = f32[3,4,5] parameter(1)
ROOT dot.1330.10585 = f32[5,2,4] dot(p0, p1),
lhs_batch_dims={2}, lhs_contracting_dims={1},
rhs_batch_dims={2}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {2, 3, 5}, {0, 1, 2}),
m::Op().WithShape(F32, {3, 4, 5}, {1, 0, 2}))));
}
TEST_F(LayoutAssignmentTest, DotOperandInconsistentDimLayouts) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,6,2,3] parameter(0)
p1 = f32[6,5,3,4] parameter(1)
ROOT dot.1330.10585 = f32[5,6,2,4] dot(p0, p1),
lhs_batch_dims={0,1}, lhs_contracting_dims={3},
rhs_batch_dims={1,0}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(F32, {5, 6, 2, 3}, {3, 2, 1, 0}),
m::Op().WithShape(F32, {6, 5, 3, 4}, {3, 2, 0, 1}))));
}
TEST_F(LayoutAssignmentTest, TransposedDotLayout) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[5,2,3] parameter(0)
p1 = f32[5,3,4,6] parameter(1)
dot = f32[5,2,4,6] dot(p0, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT out = f32[2,5,4,6] transpose(dot), dimensions={1,0,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Dot(m::Op().WithShape(F32, {5, 2, 3}, {2, 1, 0}),
m::Op().WithShape(F32, {5, 3, 4, 6}, {3, 2, 1, 0}))
.WithShape(F32, {5, 2, 4, 6}, {3, 2, 0, 1}))
.WithShape(F32, {2, 5, 4, 6}, {3, 2, 1, 0})));
}
TEST_F(LayoutAssignmentTest, TransposedDotOfDotLayout) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY dot {
p0 = f32[8,50] parameter(0)
p1 = f32[2,8,4,4] parameter(1)
p2 = f32[4,38] parameter(2)
dot.1 = f32[50,2,4,4]{3,2,1,0} dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
dot.2 = f32[50,2,4,38]{3,2,1,0} dot(dot.1, p2),
lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT out = f32[2,50,38,4]{2,3,0,1} transpose(dot.2), dimensions={1,0,3,2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Dot(m::Copy(m::Dot(m::Op().WithShape(F32, {8, 50}, {1, 0}),
m::Op().WithShape(F32, {2, 8, 4, 4},
{3, 2, 0, 1}))
.WithShape(F32, {50, 2, 4, 4}, {3, 2, 1, 0}))
.WithShape(F32, {50, 2, 4, 4}, {3, 1, 0, 2}),
m::Op().WithShape(F32, {4, 38}, {1, 0}))
.WithShape(F32, {50, 2, 4, 38}, {3, 2, 1, 0}))
.WithShape(F32, {2, 50, 38, 4}, {2, 3, 0, 1})));
}
TEST_F(LayoutAssignmentTest, DotLayoutS8) {
const char* hlo_text = R"(
HloModule DotLayout
ENTRY int8_t {
p0 = s8[32,64] parameter(0)
p1 = s8[64,96] parameter(1)
ROOT out = s32[32,96] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Op().WithShape(S8, {32, 64}, {1, 0}),
m::Op().WithShape(S8, {64, 96}, {0, 1}))));
}
TEST_F(LayoutAssignmentTest, SortLayout) {
const char* hlo_text = R"(
HloModule SortLayout
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort {
keys = f32[3,2]{0,1} constant({{0,1},{0,1},{0,1}})
values = f32[2,3]{1,0} parameter(0)
transpose = f32[3,2]{1,0} transpose(values), dimensions={1,0}
ROOT sort = (f32[3,2]{1,0}, f32[3,2]{1,0}) sort(keys, transpose),
dimensions={1}, to_apply=compare
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Sort(m::Op().WithShape(F32, {3, 2}, {1, 0}),
m::Op().WithShape(F32, {3, 2}, {1, 0}))));
}
TEST_F(LayoutAssignmentTest, TopKLayout) {
const char* hlo_text = R"(
HloModule topk
compare-greater-than {
p.1.lhs.3 = s32[] parameter(2)
p.1.rhs.4 = s32[] parameter(3)
p.0.lhs.1 = f32[] parameter(0)
bitcast-convert = s32[] bitcast-convert(p.0.lhs.1)
constant = s32[] constant(0)
compare = pred[] compare(bitcast-convert, constant), direction=LT
constant.2 = s32[] constant(2147483647)
xor = s32[] xor(constant.2, bitcast-convert)
select = s32[] select(compare, xor, bitcast-convert)
p.0.rhs.2 = f32[] parameter(1)
bitcast-convert.1 = s32[] bitcast-convert(p.0.rhs.2)
compare.1 = pred[] compare(bitcast-convert.1, constant), direction=LT
xor.1 = s32[] xor(constant.2, bitcast-convert.1)
select.1 = s32[] select(compare.1, xor.1, bitcast-convert.1)
ROOT compare.5 = pred[] compare(select, select.1), direction=GT
}
ENTRY main {
Arg_0.1 = f32[2048,6]{1,0} parameter(0)
t = f32[6,2048]{0,1} transpose(Arg_0.1), dimensions={1,0}
ROOT custom-call.1 = (f32[6,8]{1,0}, s32[6,8]{1,0}) custom-call(t), custom_call_target="__gpu$TopK", api_version=API_VERSION_TYPED_FFI, called_computations={compare-greater-than}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall(
m::Transpose(m::Copy().WithShape(F32, {2048, 6}, {0, 1}))
.WithShape(F32, {6, 2048}, {1, 0}))));
}
TEST_F(LayoutAssignmentTest, FftLayout) {
const char* hlo_text = R"(
HloModule Fft_module
ENTRY Fft {
input = c64[8,32]{0,1} parameter(0)
fft = c64[8,32] fft(input), fft_type=FFT, fft_length={32}
ROOT transpose = c64[32,8] transpose(fft), dimensions={1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ComputationLayout computation_layout(
module->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy(
m::Transpose(m::Fft(m::Op().WithShape(C64, {8, 32}, {1, 0}))
.WithShape(C64, {8, 32}, {1, 0})))));
}
TEST_F(LayoutAssignmentTest, CustomCallConstrainedAlias) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
Arg_1 = f32[2,5,5]{2,1,0} parameter(1)
Arg_2 = f32[2,5,5]{2,1,0} parameter(2)
dot.0 = f32[2,5,5]{2,1,0} dot(Arg_1, Arg_2), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={2}, operand_precision={highest,highest}
custom-call.0 = (f32[2,5,5]{1,2,0}, s8[16]{0}, s8[16]{0}) custom-call(Arg_0, dot.0), custom_call_target="dummy_call", operand_layout_constraints={f32[2,5,5]{1,2,0}, f32[2,5,5]{1,2,0}}, output_to_operand_aliasing={{0}: (1, {})}
ROOT get-tuple-element.0 = f32[2,5,5]{1,2,0} get-tuple-element(custom-call.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
auto expect_layout = [](const Shape& shape,
absl::Span<const int64_t> minor_to_major) {
const Layout expected = LayoutUtil::MakeLayout(minor_to_major);
EXPECT_TRUE(LayoutUtil::Equal(shape.layout(), expected))
<< "Expected layout " << expected << ", actual " << shape.layout();
};
expect_layout(ShapeUtil::GetSubshape(call_0->shape(), {0}), {1, 2, 0});
expect_layout(call_0->operand(0)->shape(), {1, 2, 0});
expect_layout(call_0->operand(1)->shape(), {1, 2, 0});
}
TEST_F(LayoutAssignmentTest, MoveToHostCustomCallConstrained) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
custom-call.0 = f32[2,5,5] custom-call(Arg_0), custom_call_target="MoveToHost"
ROOT custom-call.1 = f32[2,5,5]{2, 1, 0} custom-call(custom-call.0), custom_call_target="fixed_call", operand_layout_constraints={f32[2,5,5]{1,2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
const Layout input_layout = call_0->operand(0)->shape().layout();
const Layout output_layout = call_0->shape().layout();
EXPECT_TRUE(LayoutUtil::Equal(input_layout, output_layout))
<< "Expected the same input/output layouts. Input: " << input_layout
<< ". Output: " << output_layout;
}
TEST_F(LayoutAssignmentTest, MoveToDeviceCustomCallConstrained) {
const char* module_str = R"(
HloModule TestModule
ENTRY entry {
Arg_0 = f32[2,5,5]{2,1,0} parameter(0)
custom-call.0 = f32[2,5,5] custom-call(Arg_0), custom_call_target="MoveToDevice"
ROOT custom-call.1 = f32[2,5,5]{2, 1, 0} custom-call(custom-call.0), custom_call_target="fixed_call", operand_layout_constraints={f32[2,5,5]{1,2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
const HloInstruction* call_0 = FindInstruction(m.get(), "custom-call.0");
const Layout input_layout = call_0->operand(0)->shape().layout();
const Layout output_layout = call_0->shape().layout();
EXPECT_TRUE(LayoutUtil::Equal(input_layout, output_layout))
<< "Expected the same input/output layouts. Input: " << input_layout
<< ". Output: " << output_layout;
}
TEST_F(LayoutAssignmentTest, ConvCuDNNF8) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP() << "FP8 convolutions require HOPPER or newer archiecture.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0 = f8e4m3fn[1,64,64,16]{3,2,1,0} parameter(0)
Arg_1 = f8e4m3fn[3,3,16,32]{3,2,1,0} parameter(1)
ROOT conv = f8e4m3fn[1,64,64,32]{3,2,1,0} convolution(Arg_0, Arg_1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ConvCuDNNBF16) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv with Bfloat16 uses NHWC layout for "
"architectures with Tensor Cores.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0.1 = bf16[1,64,64,16]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = bf16[3,3,16,32]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = bf16[1,64,64,32]{3,2,1,0} convolution(Arg_0.1, Arg_1.2), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, metadata={op_name="jit(conv_general_dilated)/jit(main)/conv_general_dilated[window_strides=(1, 1) padding=((1, 1), (1, 1)) lhs_dilation=(1, 1) rhs_dilation=(1, 1) dimension_numbers=ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2)) feature_group_count=1 batch_group_count=1 lhs_shape=(1, 64, 64, 16) rhs_shape=(3, 3, 16, 32) precision=None preferred_element_type=None]" source_file="/usr/local/lib/python3.8/dist-packages/flax/linen/linear.py" source_line=438}
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ConvCuDNNFP16) {
if (!GetCudaComputeCapability().IsAtLeast(se::CudaComputeCapability::VOLTA)) {
GTEST_SKIP() << "Conv with FP16 uses NHWC layout for "
"architectures with Tensor Cores.";
}
const char* hlo = R"(
HloModule jit_conv_general_dilated
ENTRY main.4 {
Arg_0.1 = f16[1,64,64,16]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = f16[3,3,16,32]{3,2,1,0} parameter(1), sharding={replicated}
ROOT convolution.3 = f16[1,64,64,32]{3,2,1,0} convolution(Arg_0.1, Arg_1.2), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(LayoutAssignmentTest, ReduceOperandLayout) {
const char* module_str = R"(
scalar_add_computation {
scalar_lhs = c64[] parameter(0)
scalar_rhs = c64[] parameter(1)
ROOT add.1 = c64[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = c64[512,64,1024,32,128]{4,3,2,1,0} parameter(0)
negate = c64[512,64,1024,32,128]{4,3,2,1,0} negate(param_0)
constant_7 = c64[] constant((0, 0))
ROOT reduce.2 = c64[512,1024,128]{2,1,0} reduce(negate, constant_7), dimensions={1,3}, to_apply=scalar_add_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
LayoutUtil::MakeLayout({3, 1, 4, 2, 0}).minor_to_major());
}
TEST_F(LayoutAssignmentTest, ReduceOperandLayoutDivisorOfWarpSize) {
const char* module_str = R"(
scalar_add_computation {
scalar_lhs = c64[] parameter(0)
scalar_rhs = c64[] parameter(1)
ROOT add.1 = c64[] add(scalar_lhs, scalar_rhs)
}
ENTRY main {
param_0 = c64[512,16,1024,128]{3,2,1,0} parameter(0)
negate = c64[512,16,1024,128]{3,2,1,0} negate(param_0)
constant_7 = c64[] constant((0, 0))
ROOT reduce.2 = c64[512,1024,128]{2,1,0} reduce(negate, constant_7), dimensions={1}, to_apply=scalar_add_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
LayoutUtil::MakeLayout({1, 3, 2, 0}).minor_to_major());
}
TEST_F(LayoutAssignmentTest, AutoLayoutE4M3ContractingMinorFirst) {
const char* hlo = R"(
HloModule jit_dot_general_f8e4m3fn
ENTRY main {
p0 = f8e4m3fn[128,5120] parameter(0)
p1 = f8e4m3fn[5120,10240] parameter(1)
ROOT dot = f32[128,10240] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> m,
ParseAndReturnUnverifiedModule(
hlo, {}, HloParserOptions().set_fill_missing_layouts(false)));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape(),
false);
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Dot(m::Parameter(0).WithShape(F8E4M3FN, {128, 5120}, {1, 0}),
m::Parameter(1).WithShape(F8E4M3FN, {5120, 10240}, {0, 1}))
.WithShape(F32, {128, 10240}, {1, 0})));
}
TEST_F(LayoutAssignmentTest, VariadicReduceSameOperandLayout) {
const char* module_str = R"(
HloModule variadic_reduce
reducer {
%Arg_0.261 = s32[] parameter(0)
%Arg_2.263 = s32[] parameter(2)
mul = s32[] multiply(%Arg_0.261, %Arg_2.263)
%Arg_1.260 = f32[] parameter(1)
%Arg_3.262 = f32[] parameter(3)
add = f32[] add(%Arg_1.260, %Arg_3.262)
ROOT %tuple = (s32[], f32[]) tuple(mul, add)
}
ENTRY main {
param_0 = f32[512,16,1024,128]{3,2,1,0} parameter(0)
param_1 = s32[128,1024,16,512]{3,2,1,0} parameter(1)
transpose = s32[512,16,1024,128]{0,1,2,3} transpose(param_1), dimensions={3,2,1,0}
zero = f32[] constant(0.0)
one = s32[] constant(1)
ROOT reduce.2 = (s32[512,1024,128]{0,1,2}, f32[512,1024,128]{2,1,0}) reduce(transpose, param_0, one, zero), dimensions={1}, to_apply=reducer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
GpuLayoutAssignment layout_assignment(
&computation_layout, GetGpuComputeCapability(), GetDnnVersion());
EXPECT_THAT(layout_assignment.Run(m.get()), IsOkAndHolds(true));
auto reduce = m->entry_computation()->root_instruction();
EXPECT_EQ(reduce->operand(0)->shape().layout().minor_to_major(),
reduce->operand(1)->shape().layout().minor_to_major());
}
TEST_F(LayoutAssignmentTest, SendRcvLayout) {
const char* hlo = R"(
HloModule Module
condition {
p = (f32[100,100], (f32[100,100], u32[], token[])) parameter(0)
ROOT lt = pred[] constant(1)
}
body {
p = (f32[100,100], (f32[100,100], u32[], token[])) parameter(0)
t1 = f32[100,100] get-tuple-element(p), index=0
t = (f32[100,100], u32[], token[]) get-tuple-element(p), index=1
sdone = token[] send-done(t), channel_id=3, frontend_attributes={
_xla_send_recv_pipeline="0"
}
tk = token[] after-all()
rcvd = (f32[100,100]{0,1}, u32[], token[]) recv(tk), channel_id=2
zz = (f32[100,100]{0,1}, token[]) recv-done(rcvd), channel_id=2
rcvd_d = get-tuple-element(zz), index=0
snd = (f32[100,100]{0,1}, u32[], token[]) send(t1, tk), channel_id=3, frontend_attributes={
_xla_send_recv_pipeline="0"
}
a = add(t1, t1)
b = add(rcvd_d, a)
ROOT tup = tuple(b, snd)
}
ENTRY %main {
p0 = f32[100,100] parameter(0)
tk = token[] after-all()
snd = (f32[100,100]{0,1}, u32[], token[]) send(p0, tk), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
t = tuple(p0, snd)
ROOT loop = while(t), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
RunAndFilecheckHloRewrite(
hlo,
GpuLayoutAssignment{&computation_layout, GetGpuComputeCapability(),
GetDnnVersion()},
R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/layout_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/layout_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
483d3296-f26c-47df-99b2-4948f264e418 | cpp | tensorflow/tensorflow | generic_transfer_manager | third_party/xla/xla/service/generic_transfer_manager.cc | third_party/xla/xla/service/generic_transfer_manager_test.cc | #include "xla/service/generic_transfer_manager.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
GenericTransferManager::GenericTransferManager(se::Platform::Id platform_id,
size_t pointer_size)
: platform_id_(platform_id), pointer_size_(pointer_size) {}
se::Platform::Id GenericTransferManager::PlatformId() const {
return platform_id_;
}
absl::Status GenericTransferManager::WriteSingleTupleIndexTable(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) {
TF_RET_CHECK(elements.size() == ShapeUtil::TupleElementCount(shape));
auto element_pointers = std::make_shared<std::vector<const void*>>();
element_pointers->reserve(elements.size());
for (const se::DeviceMemoryBase& element : elements) {
element_pointers->push_back(element.opaque());
}
TF_RETURN_IF_ERROR(TransferBufferToDevice(
stream, GetByteSizeRequirement(shape), element_pointers->data(), region));
TF_RETURN_IF_ERROR(
stream->DoHostCallback([element_pointers{std::move(element_pointers)}]() {
}));
return absl::OkStatus();
}
void GenericTransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
MutableBorrowingLiteral literal, std::function<void(absl::Status)> done,
const TransferMetadata* transfer_metadata) {
VLOG(2) << "transferring literal from device ordinal "
<< stream->parent()->device_ordinal()
<< "; device buffer: " << device_buffer;
absl::Status status = [&]() -> absl::Status {
TF_RET_CHECK(stream->parent()->device_ordinal() ==
device_buffer.physical_device_ordinal());
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& subshape, const ShapeIndex& index) -> absl::Status {
if (subshape.IsArray()) {
if (PackSubbyteTypes() &&
primitive_util::IsSubByteNonPredType(subshape.element_type())) {
if (!subshape.is_static()) {
return absl::UnimplementedError(
"Int4 outputs with dynamic shapes are unsupported");
}
return TransferIntNArrayFromDevice(
stream,
device_buffer.buffer(index),
subshape.element_type(),
ShapeUtil::ElementsIn(subshape),
literal.untyped_data(index));
} else {
TF_RETURN_IF_ERROR(TransferBufferFromDevice(
stream,
device_buffer.buffer(index),
GetByteSizeRequirement(
ShapeUtil::GetSubshape(literal.shape(), index)),
literal.untyped_data(index)));
}
}
return absl::OkStatus();
}));
return absl::OkStatus();
}();
if (!status.ok()) {
done(status);
return;
}
if ((transfer_metadata != nullptr) &&
tensorflow::down_cast<const LiteralFromDeviceMetadata*>(transfer_metadata)
->callback_is_host_callback_safe) {
auto status = stream->DoHostCallback([done = std::move(done), stream] {
done(stream->ok() ? absl::OkStatus()
: Internal("`TransferLiteralFromDevice` failed"));
});
if (!status.ok()) {
LOG(ERROR) << "`DoHostCallback` failed: " << status;
}
} else {
done(stream->BlockHostUntilDone());
}
}
absl::Status GenericTransferManager::TransferLiteralToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* ) {
const Shape& shape = literal.shape();
VLOG(2) << "transferring literal shape to device: "
<< ShapeUtil::HumanString(shape)
<< "; device buffer: " << device_buffer;
TF_RET_CHECK(
ShapeUtil::Compatible(literal.shape(), device_buffer.on_device_shape()));
TF_RET_CHECK(stream->parent()->device_ordinal() ==
device_buffer.physical_device_ordinal());
TF_RETURN_IF_ERROR(WriteTupleIndexTablesAsync(stream, device_buffer));
return ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& device_subshape,
const ShapeIndex& index) -> absl::Status {
if (device_subshape.IsArray()) {
int64_t size = GetByteSizeRequirement(device_subshape);
se::DeviceMemoryBase device_memory = device_buffer.buffer(index);
TF_RET_CHECK(size == device_memory.size());
auto TransferBuffer = [&](const void* source) {
if (PackSubbyteTypes() && primitive_util::IsSubByteNonPredType(
device_subshape.element_type())) {
if (!device_subshape.is_static()) {
return absl::UnimplementedError(absl::StrCat(
primitive_util::LowercasePrimitiveTypeName(
device_subshape.element_type()),
" inputs with dynamic shapes are unsupported"));
}
return TransferIntNArrayToDevice(
stream, device_subshape.element_type(),
ShapeUtil::ElementsIn(device_subshape),
source,
&device_memory);
} else {
return TransferBufferToDevice(stream, size,
source,
&device_memory);
}
};
LiteralSlice subliteral(literal, index);
if (device_subshape.layout() == subliteral.shape().layout()) {
return TransferBuffer(subliteral.untyped_data());
} else {
auto relaid_out = std::make_shared<Literal>(
subliteral.Relayout(device_subshape.layout()));
TF_RETURN_IF_ERROR(TransferBuffer(relaid_out->untyped_data()));
TF_RETURN_IF_ERROR(stream->DoHostCallback(
[keep_alive = std::move(relaid_out)] {}));
}
}
return absl::OkStatus();
});
}
absl::Status GenericTransferManager::TransferLiteralToInfeed(
se::StreamExecutor* executor, const LiteralSlice& literal) {
return Unimplemented("Generic transfer to Infeed");
}
absl::Status GenericTransferManager::TransferLiteralFromOutfeed(
se::StreamExecutor* executor, MutableBorrowingLiteral literal) {
return Unimplemented("Generic transfer from Outfeed");
}
absl::Status GenericTransferManager::ResetDevices(
absl::Span<se::StreamExecutor* const>
) {
return Unimplemented(
"Device reset is not yet supported on this platform (b/30481585)");
}
absl::Status GenericTransferManager::TransferBufferFromDevice(
se::Stream* stream, const se::DeviceMemoryBase& source, int64_t size,
void* destination) {
if (source.size() < size) {
return absl::FailedPreconditionError(absl::StrFormat(
"Source allocation on device not large enough for data transfer: "
"%d < %d",
source.size(), size));
}
return stream->Memcpy(destination, source, size);
}
absl::Status GenericTransferManager::TransferBufferToDevice(
se::Stream* stream, int64_t size, const void* source,
se::DeviceMemoryBase* destination) {
if (destination->size() < size) {
return absl::FailedPreconditionError(absl::StrFormat(
"Destination allocation on device not large enough for data transfer: "
"%d < %d",
destination->size(), size));
}
return stream->Memcpy(destination, source, size);
}
absl::Status GenericTransferManager::TransferIntNArrayFromDevice(
se::Stream* stream, const se::DeviceMemoryBase& source,
PrimitiveType element_type, int64_t num_elements, void* destination) {
int bit_width = primitive_util::BitWidth(element_type);
int64_t elements_per_byte = 8 / bit_width;
int64_t packed_size = CeilOfRatio(num_elements, elements_per_byte);
auto packed_dst_data = std::make_unique<std::vector<char>>(packed_size);
TF_RETURN_IF_ERROR(TransferBufferFromDevice(stream, source, packed_size,
packed_dst_data->data()));
TF_RETURN_IF_ERROR(
stream->DoHostCallback([destination, bit_width, num_elements,
packed_dst_data = std::move(packed_dst_data)]() {
UnpackIntN(
bit_width, *packed_dst_data,
absl::MakeSpan(static_cast<char*>(destination), num_elements));
}));
return absl::OkStatus();
}
absl::Status GenericTransferManager::TransferIntNArrayToDevice(
se::Stream* stream, PrimitiveType element_type, int64_t num_elements,
const void* source, se::DeviceMemoryBase* destination) {
int bit_width = primitive_util::BitWidth(element_type);
int64_t elements_per_byte = 8 / bit_width;
auto packed_src_data = std::make_unique<std::vector<char>>(
CeilOfRatio(num_elements, elements_per_byte));
PackIntN(bit_width,
absl::MakeSpan(static_cast<const char*>(source), num_elements),
absl::MakeSpan(*packed_src_data));
TF_RETURN_IF_ERROR(TransferBufferToDevice(
stream, packed_src_data->size(), packed_src_data->data(), destination));
return stream->DoHostCallback([keep_alive = std::move(packed_src_data)] {});
}
int64_t GenericTransferManager::GetByteSizeRequirement(
const Shape& shape) const {
if (shape.IsTuple() || shape.is_static()) {
return ShapeUtil::ByteSizeOf(shape, pointer_size_);
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return ShapeUtil::ByteSizeOf(shape, pointer_size_) + metadata_size;
}
Shape GenericTransferManager::HostShapeToDeviceShape(
const Shape& host_shape) const {
Shape device_shape = TransferManager::HostShapeToDeviceShape(host_shape);
if (PackSubbyteTypes() &&
primitive_util::IsSubByteNonPredType(device_shape.element_type())) {
device_shape.mutable_layout()->set_element_size_in_bits(
primitive_util::BitWidth(device_shape.element_type()));
}
return device_shape;
}
absl::StatusOr<Shape> GenericTransferManager::ChooseCompactLayoutForShape(
const Shape& host_shape) const {
Shape compact_shape = LayoutUtil::GetWithDefaultLayout(host_shape);
if (PackSubbyteTypes() &&
primitive_util::IsSubByteNonPredType(compact_shape.element_type())) {
compact_shape.mutable_layout()->set_element_size_in_bits(
primitive_util::BitWidth(compact_shape.element_type()));
}
return compact_shape;
}
} | #include "xla/service/generic_transfer_manager.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/host/host_platform_id.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class PackingTransferManager : public GenericTransferManager {
public:
using GenericTransferManager::GenericTransferManager;
bool pack_subbyte_types_ = true;
private:
bool PackSubbyteTypes() const override { return pack_subbyte_types_; }
};
class GenericTransferManagerTest : public ::testing::Test {
public:
GenericTransferManagerTest()
: transfer_manager_(se::host::kHostPlatformId,
sizeof(void*)) {}
void SetUp() override {
TF_ASSERT_OK_AND_ASSIGN(
se::Platform * platform,
se::PlatformManager::PlatformWithId(se::host::kHostPlatformId));
TF_ASSERT_OK_AND_ASSIGN(stream_executor_, platform->ExecutorForDevice(0));
TF_ASSERT_OK_AND_ASSIGN(stream_, stream_executor_->CreateStream());
allocator_ =
std::make_unique<se::StreamExecutorMemoryAllocator>(stream_executor_);
}
ScopedShapedBuffer AllocateBuffer(const Shape& shape) {
auto buffer =
transfer_manager_.AllocateScopedShapedBuffer(shape, allocator_.get(),
0);
return std::move(buffer.value());
}
PackingTransferManager transfer_manager_;
se::StreamExecutor* stream_executor_;
std::unique_ptr<se::Stream> stream_;
std::unique_ptr<se::DeviceMemoryAllocator> allocator_;
};
TEST_F(GenericTransferManagerTest, TransferLiteralToDevice) {
ScopedShapedBuffer buffer = AllocateBuffer(ShapeUtil::MakeShape(U16, {2, 2}));
Literal literal = LiteralUtil::CreateR2<uint16_t>({{1, 2}, {3, 4}});
TF_ASSERT_OK(transfer_manager_.TransferLiteralToDevice(stream_.get(), literal,
buffer));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
uint16_t* device_ptr = static_cast<uint16_t*>(device_mem.opaque());
std::vector<uint16_t> expected = {1, 2, 3, 4};
EXPECT_EQ(absl::Span<uint16_t>(device_ptr, expected.size()), expected);
}
MATCHER_P2(MaskedValuesEqual, mask, expected, "") {
if (arg.size() != expected.size()) {
*result_listener << "argument sizes do not match";
return false;
}
for (size_t i = 0; i < expected.size(); ++i) {
const auto v1 = arg[i] & mask;
const auto v2 = expected[i] & mask;
if (v1 != v2) {
*result_listener << "mismatch at position " << i << ", " << v1 << " vs "
<< v2;
return false;
}
}
return true;
}
TEST_F(GenericTransferManagerTest, TransferLiteralToDeviceInt4) {
Literal literal =
LiteralUtil::CreateR2<s4>({{s4{1}, s4{-2}}, {s4{-3}, s4{4}}});
for (bool pack : {false, true}) {
SCOPED_TRACE(absl::StrCat("pack=", pack));
transfer_manager_.pack_subbyte_types_ = pack;
ScopedShapedBuffer buffer =
AllocateBuffer(ShapeUtil::MakeShape(S4, {2, 2}));
TF_ASSERT_OK(transfer_manager_.TransferLiteralToDevice(stream_.get(),
literal, buffer));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
ASSERT_EQ(device_mem.size(), pack ? 2 : 4);
int8_t* device_ptr = static_cast<int8_t*>(device_mem.opaque());
std::vector<int8_t> expected =
pack ? std::vector<int8_t>{static_cast<int8_t>(0x1e),
static_cast<int8_t>(0xd4)}
: std::vector<int8_t>{1, -2, -3, 4};
EXPECT_THAT(absl::Span<int8_t>(device_ptr, expected.size()),
MaskedValuesEqual(pack ? 0xFF : 0x0F, expected));
}
}
TEST_F(GenericTransferManagerTest, TransferLiteralFromDevice) {
ScopedShapedBuffer buffer = AllocateBuffer(ShapeUtil::MakeShape(U16, {2, 2}));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
uint16_t* device_ptr = static_cast<uint16_t*>(device_mem.opaque());
for (int i = 0; i < 4; i++) {
device_ptr[i] = i + 1;
}
TF_ASSERT_OK_AND_ASSIGN(
Literal literal,
transfer_manager_.TransferManager::TransferLiteralFromDevice(
stream_.get(), buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(
literal, LiteralUtil::CreateR2<uint16_t>({{1, 2}, {3, 4}})));
}
TEST_F(GenericTransferManagerTest, TransferLiteralFromDeviceInt4) {
for (bool pack : {false, true}) {
SCOPED_TRACE(absl::StrCat("pack=", pack));
transfer_manager_.pack_subbyte_types_ = pack;
ScopedShapedBuffer buffer =
AllocateBuffer(ShapeUtil::MakeShape(S4, {2, 2}));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
uint8_t* device_ptr = static_cast<uint8_t*>(device_mem.opaque());
if (pack) {
ASSERT_EQ(device_mem.size(), 2);
device_ptr[0] = 0x1e;
device_ptr[1] = 0xd4;
} else {
ASSERT_EQ(device_mem.size(), 4);
device_ptr[0] = 1;
device_ptr[1] = -2;
device_ptr[2] = -3;
device_ptr[3] = 4;
}
TF_ASSERT_OK_AND_ASSIGN(
Literal literal,
transfer_manager_.TransferManager::TransferLiteralFromDevice(
stream_.get(), buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(
literal,
LiteralUtil::CreateR2<s4>({{s4{1}, s4{-2}}, {s4{-3}, s4{4}}})));
}
}
TEST_F(GenericTransferManagerTest, ChooseCompactLayoutForShape) {
auto shape = ShapeUtil::MakeShape(S4, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(auto compact_shape,
transfer_manager_.ChooseCompactLayoutForShape(shape));
EXPECT_TRUE(Shape::Equal().IgnoreLayout()(compact_shape, shape));
EXPECT_EQ(compact_shape.layout().element_size_in_bits(), 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/generic_transfer_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/generic_transfer_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9234e0ca-6ac8-40ec-8b17-74532a22dca1 | cpp | tensorflow/tensorflow | hlo_creation_utils | third_party/xla/xla/service/hlo_creation_utils.cc | third_party/xla/xla/service/hlo_creation_utils_test.cc | #include "xla/service/hlo_creation_utils.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
absl::StatusOr<HloInstruction*> MakeUnaryHlo(HloOpcode opcode,
HloInstruction* operand,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape unary_op_shape,
ShapeInference::InferUnaryOpShape(opcode, operand));
return computation->AddInstruction(
HloInstruction::CreateUnary(unary_op_shape, opcode, operand), metadata);
}
HloInstruction* MakeCopyHlo(HloInstruction* from, const Shape& to) {
return from->AddInstruction(
HloInstruction::CreateUnary(to, HloOpcode::kCopy, from));
}
absl::StatusOr<HloInstruction*> MakeBinaryHlo(
HloOpcode opcode, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(Shape binary_op_shape,
ShapeInference::InferBinaryOpShape(opcode, lhs, rhs));
return computation->AddInstruction(
HloInstruction::CreateBinary(binary_op_shape, opcode, lhs, rhs), metadata,
frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeCompareHlo(
ComparisonDirection direction, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
Shape binary_op_shape,
ShapeInference::InferBinaryOpShape(HloOpcode::kCompare, lhs, rhs));
return computation->AddInstruction(
HloInstruction::CreateCompare(binary_op_shape, lhs, rhs, direction),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakePadHlo(
HloInstruction* operand, HloInstruction* padding_value,
const PaddingConfig& padding_config, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, padding_value->parent());
TF_ASSIGN_OR_RETURN(
Shape pad_shape,
ShapeInference::InferPadShape(operand->shape(), padding_value->shape(),
padding_config));
return computation->AddInstruction(
HloInstruction::CreatePad(pad_shape, operand, padding_value,
padding_config),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeSliceHlo(
HloInstruction* operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices, absl::Span<const int64_t> strides,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape slice_shape, ShapeInference::InferSliceShape(
operand->shape(), start_indices,
limit_indices, strides));
return computation->AddInstruction(
HloInstruction::CreateSlice(slice_shape, operand, start_indices,
limit_indices, strides),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeConvolveHlo(
HloInstruction* lhs, HloInstruction* rhs, int64_t feature_group_count,
int64_t batch_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
Shape convolve_shape,
ShapeInference::InferConvolveShape(
lhs->shape(), rhs->shape(), feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
return computation->AddInstruction(
HloInstruction::CreateConvolve(
convolve_shape, lhs, rhs, feature_group_count, batch_group_count,
window, dimension_numbers, precision_config),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeTransposeHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions) {
TF_ASSIGN_OR_RETURN(
Shape transpose_shape,
ShapeInference::InferTransposeShape(operand->shape(), dimensions));
return operand->AddInstruction(
HloInstruction::CreateTranspose(transpose_shape, operand, dimensions));
}
absl::StatusOr<HloInstruction*> MakeReshapeHlo(const Shape& result_shape,
HloInstruction* operand) {
return operand->AddInstruction(
HloInstruction::CreateReshape(result_shape, operand));
}
absl::StatusOr<HloInstruction*> MakeReshapeHlo(
absl::Span<const int64_t> result_shape_dim_bounds,
HloInstruction* operand) {
Shape new_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_dim_bounds);
return MakeReshapeHlo(new_shape, operand);
}
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, absl::Span<HloInstruction* const> start_indices,
absl::Span<const int64_t> slice_sizes, const OpMetadata* metadata) {
if (start_indices.empty() || slice_sizes.empty()) {
return operand;
}
HloComputation* computation = operand->parent();
std::vector<Shape> scalar_start_indices_shapes(
start_indices.size(),
ShapeUtil::MakeShape(start_indices[0]->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_slice_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(), scalar_start_indices_shapes, slice_sizes));
return computation->AddInstruction(
HloInstruction::CreateDynamicSlice(dynamic_slice_shape, operand,
start_indices, slice_sizes),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, HloInstruction* start_indices,
absl::Span<const int64_t> slice_sizes, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, start_indices->parent());
int64_t rank = start_indices->shape().dimensions(0);
std::vector<HloInstruction*> scalar_start_indices;
for (int i = 0; i < rank; ++i) {
auto slice = computation->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {1}),
start_indices, {i}, {i + 1}, {1}));
scalar_start_indices.push_back(
computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {}),
slice)));
}
std::vector<Shape> scalar_start_indices_shapes(
rank, ShapeUtil::MakeShape(start_indices->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_slice_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(), scalar_start_indices_shapes, slice_sizes));
return computation->AddInstruction(
HloInstruction::CreateDynamicSlice(dynamic_slice_shape, operand,
scalar_start_indices, slice_sizes),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
HloInstruction* start_indices, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, update->parent());
CHECK_EQ(computation, start_indices->parent());
int64_t rank = start_indices->shape().dimensions(0);
std::vector<HloInstruction*> scalar_start_indices;
for (int i = 0; i < rank; ++i) {
auto slice = computation->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {1}),
start_indices, {i}, {i + 1}, {1}));
scalar_start_indices.push_back(
computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {}),
slice)));
}
std::vector<Shape> scalar_start_indices_shapes(
rank, ShapeUtil::MakeShape(start_indices->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_update_slice_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(), scalar_start_indices_shapes));
return computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(
dynamic_update_slice_shape, operand, update, scalar_start_indices),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
absl::Span<HloInstruction* const> start_indices,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, update->parent());
std::vector<Shape> scalar_start_indices_shapes;
scalar_start_indices_shapes.reserve(start_indices.size());
for (auto start_index : start_indices) {
scalar_start_indices_shapes.push_back(start_index->shape());
}
TF_ASSIGN_OR_RETURN(
Shape dynamic_update_slice_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(), scalar_start_indices_shapes));
return computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(dynamic_update_slice_shape,
operand, update, start_indices),
metadata);
}
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
absl::Span<const int64_t> result_shape_bounds, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
Shape broadcast_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_bounds);
return MakeBroadcastHlo(operand, broadcast_dimensions, broadcast_shape,
metadata, frontend_attributes);
}
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
const Shape& shape, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
return computation->AddInstruction(
HloInstruction::CreateBroadcast(shape, operand, broadcast_dimensions),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeGetTupleElementHlo(
HloInstruction* operand, int64_t index, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(
Shape gte_shape,
ShapeInference::InferGetTupleElementShape(operand->shape(), index));
return computation->AddInstruction(
HloInstruction::CreateGetTupleElement(gte_shape, operand, index),
metadata);
}
absl::StatusOr<HloInstruction*> MakeConcatHlo(
absl::Span<HloInstruction* const> operands, int64_t dimension,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
CHECK_GT(operands.size(), 0);
HloComputation* computation = operands[0]->parent();
CHECK(absl::c_all_of(operands, [&](HloInstruction* instr) {
return instr->parent() == computation;
}));
std::vector<const Shape*> operand_shapes;
absl::c_transform(operands, std::back_inserter(operand_shapes),
[](HloInstruction* instr) { return &instr->shape(); });
TF_ASSIGN_OR_RETURN(Shape concat_shape, ShapeInference::InferConcatOpShape(
operand_shapes, dimension));
return computation->AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, operands, dimension),
metadata, frontend_attributes);
}
HloInstruction* MakeConvertToHlo(HloInstruction* hlo, PrimitiveType type,
const OpMetadata* metadata) {
if (hlo->shape().element_type() == type) {
return hlo;
}
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
if (primitive_util::IsSubByteNonPredType(shape.element_type())) {
shape.mutable_layout()->set_element_size_in_bits(
primitive_util::BitWidth(shape.element_type()));
} else {
shape.mutable_layout()->set_element_size_in_bits(0);
}
hlo = hlo->parent()->AddInstruction(HloInstruction::CreateConvert(shape, hlo),
metadata);
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
HloInstruction* MakeBitcastHlo(HloInstruction* hlo, const Shape& shape,
const OpMetadata* metadata) {
return hlo->parent()->AddInstruction(
HloInstruction::CreateBitcast(shape, hlo), metadata);
}
HloInstruction* MakeBitcastConvertToHlo(HloInstruction* hlo, PrimitiveType type,
const OpMetadata* metadata) {
if (hlo->shape().element_type() == type) {
return hlo;
}
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
if (type == PRED || hlo->shape().element_type() == PRED) {
return MakeConvertToHlo(hlo, type);
}
hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateBitcastConvert(shape, hlo), metadata);
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
HloInstruction* MakeIotaHlo(HloComputation* computation, const Shape& shape,
int64_t iota_dimension) {
return computation->AddInstruction(
HloInstruction::CreateIota(shape, iota_dimension));
}
absl::StatusOr<HloInstruction*> MakeDotHlo(
HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::vector<SparsityDescriptor> sparsity,
absl::Span<HloInstruction* const> sparse_meta, const OpMetadata* metadata) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(Shape dot_shape,
ShapeInference::InferDotOpShape(
lhs->shape(), rhs->shape(), dim_numbers,
preferred_element_type, absl::MakeSpan(sparsity)));
return computation->AddInstruction(
HloInstruction::CreateDot(dot_shape, lhs, rhs, dim_numbers,
precision_config, sparsity, sparse_meta),
metadata);
}
absl::StatusOr<HloInstruction*> MakeMapHlo(
absl::Span<HloInstruction* const> operands, HloComputation* map_computation,
const OpMetadata* metadata) {
CHECK(!operands.empty()) << "Map Hlo requires at least one operand.";
HloComputation* computation = operands.front()->parent();
std::vector<const Shape*> operand_shapes;
int64_t max_operand_rank = 0;
for (const HloInstruction* operand : operands) {
CHECK_EQ(computation, operand->parent());
operand_shapes.push_back(&operand->shape());
max_operand_rank = std::max(max_operand_rank, operand->shape().rank());
}
std::vector<int64_t> map_dims(max_operand_rank);
std::iota(map_dims.begin(), map_dims.end(), 0);
TF_ASSIGN_OR_RETURN(
Shape map_shape,
ShapeInference::InferMapShape(
operand_shapes, map_computation->ComputeProgramShape(), map_dims));
return computation->AddInstruction(
HloInstruction::CreateMap(map_shape, operands, map_computation),
metadata);
}
HloInstruction* MakeReducePrecisionHlo(HloInstruction* operand,
int exponent_bits, int mantissa_bits,
const OpMetadata* metadata) {
return operand->parent()->AddInstruction(
HloInstruction::CreateReducePrecision(operand->shape(), operand,
exponent_bits, mantissa_bits),
metadata);
}
namespace {
static HloComputation* MakeBinaryScalarComputation(HloOpcode binary_opcode,
PrimitiveType dtype,
HloInstruction* ctx,
HloModule* module) {
CHECK_NE(ctx, nullptr);
HloComputation::Builder b(
absl::StrCat(ctx->name(), ".reduce_sub_computation"));
const Shape scalar_shape = ShapeUtil::MakeShape(dtype, {});
HloInstruction* lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
HloInstruction* rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
b.AddInstruction(
HloInstruction::CreateBinary(scalar_shape, binary_opcode, lhs, rhs));
CHECK_NE(module, nullptr);
return module->AddEmbeddedComputation(b.Build());
}
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloComputation* reduce_computation,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
auto scalar_shape = ShapeUtil::MakeShape(operand->shape().element_type(), {});
auto result_shape = ShapeUtil::DeleteDimensions(dimensions, operand->shape());
return operand->parent()->AddInstruction(
HloInstruction::CreateReduce(result_shape, operand, init_value,
dimensions, reduce_computation),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReduceWindowHlo(
HloInstruction* operand, HloInstruction* init_value, const Window& window,
HloComputation* reduce_computation, const OpMetadata* metadata) {
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferReduceWindowShape(
operand->shape(), init_value->shape(), window,
reduce_computation->ComputeProgramShape()));
return operand->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(inferred_shape, operand, init_value,
window, reduce_computation),
metadata);
}
absl::StatusOr<HloInstruction*> MakeReduceWindowHlo(
HloInstruction* operand, HloInstruction* init_value, const Window& window,
HloOpcode binary_opcode, const OpMetadata* metadata) {
HloComputation* reduce_computation = MakeBinaryScalarComputation(
binary_opcode, operand->shape().element_type(), operand,
operand->GetModule());
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferReduceWindowShape(
operand->shape(), init_value->shape(), window,
reduce_computation->ComputeProgramShape()));
return operand->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(inferred_shape, operand, init_value,
window, reduce_computation),
metadata);
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloOpcode binary_opcode,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* reduce_computation = MakeBinaryScalarComputation(
binary_opcode, operand->shape().element_type(), operand,
operand->GetModule());
return MakeReduceHlo(operand, init_value, dimensions, reduce_computation,
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
HloOpcode binary_opcode, HloModule* module, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
DCHECK_NE(nullptr, module);
std::vector<int64_t> all_dims(operand->shape().rank());
std::iota(all_dims.begin(), all_dims.end(), 0);
HloComputation* reduce_computation = MakeBinaryScalarComputation(
binary_opcode, operand->shape().element_type(), operand, module);
return MakeReduceHlo(operand, init_value, all_dims, reduce_computation,
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions, HloComputation* reduce_computation,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
CHECK(!operands.empty());
CHECK_EQ(operands.size(), init_values.size());
auto root = reduce_computation->root_instruction();
if (root->shape().IsTuple()) {
CHECK_EQ(root->shape().tuple_shapes_size(), operands.size());
} else {
CHECK_EQ(operands.size(), 1);
}
std::vector<Shape> expected_shapes;
for (auto operand : operands) {
expected_shapes.push_back(ShapeUtil::FilterDimensions(
[&](const int64_t dim) {
return !absl::c_linear_search(dimensions, dim);
},
operand->shape()));
}
auto output_shape = ShapeUtil::MakeMaybeTupleShape(expected_shapes);
return operands[0]->parent()->AddInstruction(
HloInstruction::CreateReduce(output_shape, operands, init_values,
dimensions, reduce_computation),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReverseHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape reverse_shape, ShapeInference::InferReverseShape(
operand->shape(), dimensions));
return computation->AddInstruction(
HloInstruction::CreateReverse(reverse_shape, operand, dimensions),
metadata);
}
absl::StatusOr<HloInstruction*> MakeSelectHlo(
HloInstruction* pred, HloInstruction* on_true, HloInstruction* on_false,
HloInstruction* derived_from, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = pred->parent();
DCHECK_EQ(computation, on_true->parent());
DCHECK_EQ(computation, on_false->parent());
Shape op_shape = on_true->shape();
if (ShapeUtil::IsScalar(pred->shape())) {
if (!ShapeUtil::IsScalar(op_shape) && !op_shape.IsTuple()) {
pred = computation->AddInstruction(
HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(op_shape, PrimitiveType::PRED), pred,
{}),
metadata, frontend_attributes);
if (derived_from) {
derived_from->SetupDerivedInstruction(pred);
}
}
}
TF_RET_CHECK(!op_shape.IsTuple());
HloOpcode select_op_code = HloOpcode::kSelect;
TF_ASSIGN_OR_RETURN(Shape select_shape,
ShapeInference::InferTernaryOpShape(select_op_code, pred,
on_true, on_false));
HloInstruction* select = computation->AddInstruction(
HloInstruction::CreateTernary(select_shape, select_op_code, pred, on_true,
on_false),
metadata, frontend_attributes);
if (derived_from) {
derived_from->SetupDerivedInstruction(select);
}
return select;
}
HloInstruction* MaybeMakeTuple(absl::Span<HloInstruction* const> operands) {
CHECK(!operands.empty());
if (operands.size() == 1) {
return operands[0];
}
return operands[0]->parent()->AddInstruction(
HloInstruction::CreateTuple(operands));
}
absl::StatusOr<HloInstruction*> MakeSortHlo(
const Shape& sort_shape, absl::Span<HloInstruction* const> operands,
int64_t dimension_to_sort, bool is_stable, HloComputation::Builder* builder,
HloModule* module, const OpMetadata* metadata) {
CHECK(!operands.empty()) << "Sort Hlo requires at least one operand.";
HloComputation* compare_computation;
XlaBuilder b("Sort.Compare");
if (metadata != nullptr) {
b.SetOpMetadata(*metadata);
}
std::vector<PrimitiveType> operand_types(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
operand_types[i] = operands[i]->shape().element_type();
}
XlaComputation comparator = CreateScalarLtComputation(operand_types, &b);
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comparator.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comparator.proto(), config));
HloCloneContext context(module);
compare_computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
return builder->AddInstruction(HloInstruction::CreateSort(
sort_shape, dimension_to_sort, operands, compare_computation, is_stable));
}
absl::StatusOr<HloInstruction*> CollapseFirstNDims(HloInstruction* operand,
int64_t n) {
CHECK_GT(n, 0);
const Shape& operand_shape = operand->shape();
CHECK_GE(operand_shape.dimensions_size(), n);
int64_t new_shape_leading_bound = 1;
bool new_shape_leading_is_dynamic = false;
for (int64_t i = 0; i < n; i++) {
new_shape_leading_bound *= operand_shape.dimensions(i);
new_shape_leading_is_dynamic |= operand_shape.is_dynamic_dimension(i);
}
std::vector<int64_t> new_shape_dims;
new_shape_dims.reserve(operand_shape.dimensions_size() - n + 1);
new_shape_dims.push_back(new_shape_leading_bound);
std::copy(operand_shape.dimensions().begin() + n,
operand_shape.dimensions().end(),
std::back_inserter(new_shape_dims));
std::vector<bool> new_shape_dynamic_dims;
new_shape_dynamic_dims.reserve(operand_shape.dimensions_size() - n + 1);
new_shape_dynamic_dims.push_back(new_shape_leading_is_dynamic);
std::copy(operand_shape.dynamic_dimensions().begin() + n,
operand_shape.dynamic_dimensions().end(),
std::back_inserter(new_shape_dynamic_dims));
Shape output_shape = ShapeUtil::MakeShape(
operand_shape.element_type(), new_shape_dims, new_shape_dynamic_dims);
return MakeReshapeHlo(output_shape, operand);
}
absl::StatusOr<HloInstruction*> PrependDegenerateDims(HloInstruction* operand,
int64_t n) {
CHECK_GT(n, 0);
std::vector<int64_t> new_shape_dims;
const Shape& operand_shape = operand->shape();
new_shape_dims.reserve(n + operand_shape.dimensions_size());
new_shape_dims.insert(new_shape_dims.begin(), n, 1);
absl::c_copy(operand_shape.dimensions(), std::back_inserter(new_shape_dims));
return MakeReshapeHlo(new_shape_dims, operand);
}
absl::StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
HloInstruction* operand, absl::Span<const int64_t> expanded_dims) {
CHECK_GT(operand->shape().dimensions_size(), 0);
CHECK_EQ(operand->shape().dimensions(0), Product(expanded_dims));
std::vector<int64_t> expanded_shape_dim_bounds;
expanded_shape_dim_bounds.reserve(expanded_dims.size() +
operand->shape().dimensions_size() - 1);
absl::c_copy(expanded_dims, std::back_inserter(expanded_shape_dim_bounds));
std::copy(operand->shape().dimensions().begin() + 1,
operand->shape().dimensions().end(),
std::back_inserter(expanded_shape_dim_bounds));
Shape new_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
expanded_shape_dim_bounds);
return MakeReshapeHlo(new_shape, operand);
}
absl::StatusOr<HloInstruction*> ElideDegenerateDims(
HloInstruction* operand, absl::Span<const int64_t> dims_to_elide) {
return MakeReshapeHlo(ShapeUtil::FilterDimensions(
[&](int64_t dim) {
return !absl::c_linear_search(dims_to_elide, dim);
},
operand->shape()),
operand);
}
absl::StatusOr<HloInstruction*> InsertDegenerateDims(
HloInstruction* operand, absl::Span<const int64_t> dims_to_insert) {
CHECK(absl::c_is_sorted(dims_to_insert));
const Shape& operand_shape = operand->shape();
int64_t output_shape_rank =
operand_shape.dimensions_size() + dims_to_insert.size();
for (auto dim_to_insert : dims_to_insert) {
CHECK_LT(dim_to_insert, output_shape_rank);
}
std::vector<int64_t> output_shape_dim_bounds;
output_shape_dim_bounds.reserve(output_shape_rank);
int64_t operand_dims_idx = 0;
int64_t dims_to_insert_idx = 0;
for (int64_t i = 0; i < output_shape_rank; ++i) {
if (dims_to_insert_idx < dims_to_insert.size() &&
i == dims_to_insert[dims_to_insert_idx]) {
output_shape_dim_bounds.push_back(1);
++dims_to_insert_idx;
} else {
output_shape_dim_bounds.push_back(
operand_shape.dimensions(operand_dims_idx));
++operand_dims_idx;
}
}
Shape output_shape = ShapeUtil::MakeShape(operand_shape.element_type(),
output_shape_dim_bounds);
return MakeReshapeHlo(output_shape, operand);
}
absl::StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
int64_t zeros_to_prepend,
int64_t zeros_to_append) {
HloComputation* computation = operand->parent();
CHECK_EQ(operand->shape().dimensions_size(), 1);
PaddingConfig padding_config;
PaddingConfig::PaddingConfigDimension padding_config_dim;
padding_config_dim.set_edge_padding_low(zeros_to_prepend);
padding_config_dim.set_edge_padding_high(zeros_to_append);
*padding_config.add_dimensions() = padding_config_dim;
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(operand->shape().element_type())));
return MakePadHlo(operand, zero, padding_config);
}
HloInstruction* BroadcastZeros(HloComputation* computation,
PrimitiveType element_type,
absl::Span<const int64_t> broadcast_dimensions) {
return BroadcastZeros(
computation, ShapeUtil::MakeShape(element_type, broadcast_dimensions));
}
HloInstruction* BroadcastZeros(HloComputation* computation,
const Shape& broadcast_shape) {
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(broadcast_shape.element_type())));
return MakeBroadcastHlo(zero, {}, broadcast_shape);
}
HloInstruction* BroadcastOnes(HloComputation* computation,
PrimitiveType element_type,
absl::Span<const int64_t> broadcast_dimensions) {
HloInstruction* one = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(element_type)));
return MakeBroadcastHlo(one, {},
broadcast_dimensions);
}
absl::StatusOr<HloInstruction*> MakeFusionInstruction(
HloInstruction* fused, HloInstruction::FusionKind kind) {
HloComputation* comp = fused->parent();
HloInstruction* fusion_instruction = comp->AddInstruction(
HloInstruction::CreateFusion(fused->shape(), kind, fused));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(fused, fusion_instruction));
return fusion_instruction;
}
HloInstruction* CreateDummyOp(HloComputation::Builder* b, const Shape& shape) {
if (shape.IsArray()) {
auto zero = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(shape.element_type())));
return b->AddInstruction(HloInstruction::CreateBroadcast(shape, zero, {}));
}
CHECK(shape.IsTuple());
std::vector<HloInstruction*> sub_instructions;
for (const Shape& subshape : shape.tuple_shapes()) {
sub_instructions.push_back(CreateDummyOp(b, subshape));
}
return b->AddInstruction(HloInstruction::CreateTuple(sub_instructions));
}
absl::StatusOr<std::unique_ptr<HloComputation>> CreateComputationWithSignature(
absl::Span<const Shape* const> domain, const Shape& range,
absl::string_view name) {
HloComputation::Builder b{name};
int64_t param_idx = 0;
for (const Shape* param_shape : domain) {
b.AddInstruction(HloInstruction::CreateParameter(
param_idx, *param_shape, StrCat("param.", param_idx)));
param_idx++;
}
CreateDummyOp(&b, range);
return b.Build();
}
HloInstruction* CreateDegenerateRemovingReshape(HloInstruction* hlo,
const int64_t index_to_remove) {
Shape input_shape = hlo->shape();
std::vector<int64_t> dims;
dims.reserve(input_shape.rank() - 1);
for (int64_t index = 0; index < input_shape.rank(); index++) {
if (index == index_to_remove) {
continue;
}
int64_t dim_size = input_shape.dimensions(index);
dims.push_back(dim_size);
}
Shape new_shape = ShapeUtil::MakeShape(input_shape.element_type(), dims);
return hlo->AddInstruction(HloInstruction::CreateReshape(new_shape, hlo));
}
HloInstruction* CreateDegenerateAddingReshape(HloInstruction* hlo,
const int index_to_add) {
Shape input_shape = hlo->shape();
std::vector<int64_t> dims;
dims.reserve(input_shape.rank() - 1);
for (int64_t index = 0; index < input_shape.rank(); index++) {
if (index == index_to_add) {
dims.push_back(1);
}
int64_t dim_size = input_shape.dimensions(index);
dims.push_back(dim_size);
}
if (index_to_add == input_shape.rank()) {
dims.push_back(1);
}
Shape new_shape = ShapeUtil::MakeShape(input_shape.element_type(), dims);
return hlo->AddInstruction(HloInstruction::CreateReshape(new_shape, hlo));
}
HloInstruction* ExpandDegenerateReshape(HloInstruction* inst) {
std::optional<ShapeUtil::ShapeEqualityDescriptor> reshape_degenerate =
inst->ReshapeMerelyInsertsOrDeletes1SizedDimensions();
if (reshape_degenerate.has_value()) {
if (reshape_degenerate->deleted_dimensions.empty() &&
reshape_degenerate->inserted_dimensions.size() == 1) {
return nullptr;
}
if (reshape_degenerate->inserted_dimensions.empty() &&
reshape_degenerate->deleted_dimensions.size() == 1) {
return nullptr;
}
absl::c_reverse(reshape_degenerate->deleted_dimensions);
HloInstruction* degenerate_removing_hlo = nullptr;
if (!reshape_degenerate->deleted_dimensions.empty()) {
degenerate_removing_hlo = CreateDegenerateRemovingReshape(
inst->mutable_operand(0), reshape_degenerate->deleted_dimensions[0]);
for (int64_t r = 1; r < reshape_degenerate->deleted_dimensions.size();
r++) {
degenerate_removing_hlo = CreateDegenerateRemovingReshape(
degenerate_removing_hlo, reshape_degenerate->deleted_dimensions[r]);
}
}
HloInstruction* degenerate_adding_hlo = degenerate_removing_hlo != nullptr
? degenerate_removing_hlo
: inst->mutable_operand(0);
if (!reshape_degenerate->inserted_dimensions.empty()) {
for (int64_t a = 0; a < reshape_degenerate->inserted_dimensions.size();
a++) {
degenerate_adding_hlo = CreateDegenerateAddingReshape(
degenerate_adding_hlo, reshape_degenerate->inserted_dimensions[a]);
}
}
return degenerate_adding_hlo;
}
return nullptr;
}
} | #include "xla/service/hlo_creation_utils.h"
#include <memory>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = match;
class HloCreationUtilsTest : public HloTestBase {
protected:
std::unique_ptr<VerifiedHloModule> CreateModuleWithProgramShape(
PrimitiveType primitive_type, absl::Span<const int64_t> input_shape_dims,
absl::Span<const int64_t> output_shape_dims, HloInstruction** param,
HloComputation** entry_computation) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type, output_shape_dims);
auto module = CreateNewVerifiedModule("test");
*entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape}, output_shape, "entry")
.value());
*param = (*entry_computation)->parameter_instruction(0);
return module;
}
std::unique_ptr<VerifiedHloModule> CreateModuleWithProgramShape(
PrimitiveType primitive_type, absl::Span<const int64_t> input_shape_dims,
absl::Span<const int64_t> output_shape_dims, HloInstruction** param,
HloComputation** entry_computation, PrimitiveType primitive_type_output) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type_output, output_shape_dims);
auto module = CreateNewVerifiedModule("test");
*entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape}, output_shape, "entry")
.value());
*param = (*entry_computation)->parameter_instruction(0);
return module;
}
};
TEST_F(HloCreationUtilsTest, CollapseFirst1Dim) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{2}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_1_dims_collapsed,
CollapseFirstNDims(param, 1));
entry_computation->set_root_instruction(first_1_dims_collapsed);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({3, 4})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR1<int32_t>({3, 4}));
}
TEST_F(HloCreationUtilsTest, CollapseFirst2Dims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(
S32, {2, 3, 2}, {6, 2}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_2_dims_collapsed,
CollapseFirstNDims(param, 2));
entry_computation->set_root_instruction(first_2_dims_collapsed);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}},
{{-1, -2}, {-3, -4}, {-5, -6}}})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<int32_t>(
{{1, 2}, {3, 4}, {5, 6}, {-1, -2}, {-3, -4}, {-5, -6}}));
}
TEST_F(HloCreationUtilsTest, Prepend1DegenerateDim) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_1_degenerate_dim_prepended,
PrependDegenerateDims(param, 1));
entry_computation->set_root_instruction(with_1_degenerate_dim_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({9, 10})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{9, 10}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{1, 1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_2_degenerate_dims_prepended,
PrependDegenerateDims(param, 2));
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({9, 10})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR3<int32_t>({{{9, 10}}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDimsToScalar) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{1, 1},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_2_degenerate_dims_prepended,
PrependDegenerateDims(param, 2));
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(9)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{9}}));
}
TEST_F(HloCreationUtilsTest, ExpandFirstDimInto3Dims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {6},
{3, 1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_dim_expanded,
ExpandFirstDimIntoNDims(param, {3, 1, 2}));
entry_computation->set_root_instruction(first_dim_expanded);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR3<int32_t>({{{1, 2}}, {{3, 4}}, {{5, 6}}}));
}
TEST_F(HloCreationUtilsTest, PadVectorWithZeros) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{6}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * zero_padded_param,
PadVectorWithZeros(param, 3, 1));
entry_computation->set_root_instruction(zero_padded_param);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({3, 4})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR1<int32_t>({0, 0, 0, 3, 4, 0}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_S32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation);
HloInstruction* zeros =
BroadcastZeros(module->entry_computation(), S32, {2, 2});
entry_computation->set_root_instruction(zeros);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_F32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {},
{2, 2},
¶m, &entry_computation);
HloInstruction* zeros =
BroadcastZeros(module->entry_computation(), F32, {2, 2});
entry_computation->set_root_instruction(zeros);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBitcastConvertToHlo_S32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2, 2},
{2, 2},
¶m, &entry_computation, F32);
auto* input = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})));
HloInstruction* output = MakeBitcastConvertToHlo(input, F32);
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeIotaHlo_I32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation, F32);
HloInstruction* output = MakeIotaHlo(module->entry_computation(),
ShapeUtil::MakeShape(F32, {2, 2}), 0);
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0.0)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {1.0f, 1.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBroadcast_F32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {},
{2, 2},
¶m, &entry_computation);
auto* input = MakeR0ConstantHlo<float>(module->entry_computation(), 0);
HloInstruction* output = MakeBroadcastHlo(input, {}, {2, 2});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBroadcast_Shape_I32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation);
auto* input = MakeR0ConstantHlo<int32_t>(module->entry_computation(), 0);
HloInstruction* output =
MakeBroadcastHlo(input, {}, ShapeUtil::MakeShape(S32, {2, 2}));
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0.0)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleCrashesWithEmptyOperands) {
EXPECT_DEATH(MaybeMakeTuple({}), "");
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleForwardsSingleElement) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2, 2},
{2, 2},
¶m, &entry_computation);
HloInstruction* output = MaybeMakeTuple({param});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})}));
EXPECT_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleTuplizesMultipleOperands) {
Shape input_shape0 = ShapeUtil::MakeShape(S32, {2});
Shape input_shape1 = ShapeUtil::MakeShape(F32, {3, 3});
Shape output_shape =
ShapeUtil::MakeTupleShapeWithPtrs({&input_shape1, &input_shape0});
auto module = CreateNewVerifiedModule("test");
HloComputation* entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape0, &input_shape1},
output_shape, "entry")
.value());
HloInstruction* output =
MaybeMakeTuple({entry_computation->parameter_instruction(1),
entry_computation->parameter_instruction(0)});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
Literal input0 = LiteralUtil::CreateR1<int32_t>({{2, 4}});
Literal input1 =
LiteralUtil::CreateR2<float>({{3, 2, 1}, {4, 5, 6}, {9, 8, 7}});
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {input0.Clone(), input1.Clone()}));
Literal expected_result = LiteralUtil::MakeTuple({&input1, &input0});
EXPECT_EQ(result_literal, expected_result);
}
TEST_F(HloCreationUtilsTest, DynamicUpdateSliceVectorStartIndices) {
auto module = CreateNewVerifiedModule("dus-creation-test");
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
Shape input_shape = ShapeUtil::MakeShape(F64, {2, 3});
Shape update_shape = ShapeUtil::MakeShape(F64, {2, 2});
HloComputation* entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape, &update_shape}, input_shape,
"entry")
.value());
auto zero = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto update = LiteralUtil::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}});
HloInstruction* dus =
MakeDynamicUpdateSliceHlo(entry_computation->parameter_instruction(0),
entry_computation->parameter_instruction(1),
{zero, one})
.value();
entry_computation->set_root_instruction(dus);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result, evaluator.Evaluate(*module, {&operand_literal, &update}));
auto expected = LiteralUtil::CreateR2<double>({
{1, -2, -3},
{5, -6, -7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloCreationUtilsTest, ExpandDegenerateReshape) {
const char* hlo_string = R"(
HloModule module
ENTRY test {
param = f32[12,1,10,32,8] parameter(0)
ROOT reshape = f32[1,12,10,1,32,1,8] reshape(param)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto expanded =
ExpandDegenerateReshape(module->entry_computation()->root_instruction());
EXPECT_THAT(expanded, GmockMatch(m::Reshape(m::Reshape(
m::Reshape(m::Reshape(m::Parameter(0)))))));
}
TEST_F(HloCreationUtilsTest, ReduceWindow) {
const Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
HloComputation* addition = [&] {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module->AddEmbeddedComputation(embedded_builder.Build());
}();
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
Shape expected_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * reduce_window,
MakeReduceWindowHlo(a_param, init, window, addition));
module->entry_computation()->set_root_instruction(
reduce_window,
true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
EXPECT_EQ(module->entry_computation()->root_instruction()->shape(),
expected_output_shape);
}
TEST_F(HloCreationUtilsTest, ReduceWindowBinaryOpcode) {
const Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
Shape expected_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * reduce_window,
MakeReduceWindowHlo(a_param, init, window, HloOpcode::kAdd));
module->entry_computation()->set_root_instruction(
reduce_window,
true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
EXPECT_EQ(module->entry_computation()->root_instruction()->shape(),
expected_output_shape);
}
TEST_F(HloCreationUtilsTest, DynamicBroadcastShape) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {10},
{10}, ¶m,
&entry_computation);
param->mutable_shape()->set_dynamic_dimension(0, true);
HloInstruction* one_constant = MakeScalarLike(param, 1.0f);
EXPECT_TRUE(one_constant->shape().is_static());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_creation_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_creation_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
25324229-a464-4608-8206-4630461737fa | cpp | tensorflow/tensorflow | convolution_4d_expander | third_party/xla/xla/service/convolution_4d_expander.cc | third_party/xla/xla/service/convolution_4d_expander_test.cc | #include "xla/service/convolution_4d_expander.h"
#include <algorithm>
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
bool Convolution4DExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kConvolution) {
return false;
}
const ConvolutionDimensionNumbers& dim_nums =
instruction->convolution_dimension_numbers();
if (dim_nums.input_spatial_dimensions().size() != 4) {
return false;
}
Shape input = instruction->operand(0)->shape();
for (int64_t i = 0; i < dim_nums.input_spatial_dimensions().size(); ++i) {
int64_t spatial_dim = dim_nums.input_spatial_dimensions(i);
if (input.dimensions(spatial_dim) == 1 &&
instruction->window().dimensions(i).padding_low() == 0 &&
instruction->window().dimensions(i).padding_high() == 0) {
return true;
}
}
return false;
}
absl::StatusOr<HloInstruction*> Convolution4DExpander::ExpandInstruction(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
ConvolutionDimensionNumbers dim_nums =
instruction->convolution_dimension_numbers();
ConvolutionDimensionNumbers new_dim_nums = dim_nums;
std::vector<int64_t> removed_input_dimensions;
std::vector<int64_t> removed_kernel_dimensions;
std::vector<int64_t> removed_output_dimensions;
new_dim_nums.clear_input_spatial_dimensions();
new_dim_nums.clear_output_spatial_dimensions();
new_dim_nums.clear_kernel_spatial_dimensions();
Window new_window;
HloInstruction* input = instruction->mutable_operand(0);
for (int64_t i = 0; i < dim_nums.input_spatial_dimensions().size(); ++i) {
int64_t input_spatial_dim = dim_nums.input_spatial_dimensions(i);
int64_t output_spatial_dim = dim_nums.output_spatial_dimensions(i);
int64_t kernel_spatial_dim = dim_nums.kernel_spatial_dimensions(i);
if (input->shape().dimensions(input_spatial_dim) == 1 &&
instruction->window().dimensions(i).padding_low() == 0 &&
instruction->window().dimensions(i).padding_high() == 0) {
removed_input_dimensions.push_back(input_spatial_dim);
removed_output_dimensions.push_back(output_spatial_dim);
removed_kernel_dimensions.push_back(kernel_spatial_dim);
} else {
*new_window.add_dimensions() = instruction->window().dimensions(i);
new_dim_nums.add_input_spatial_dimensions(input_spatial_dim);
new_dim_nums.add_output_spatial_dimensions(output_spatial_dim);
new_dim_nums.add_kernel_spatial_dimensions(kernel_spatial_dim);
}
}
std::sort(removed_input_dimensions.begin(), removed_input_dimensions.end(),
std::greater<>());
std::sort(removed_output_dimensions.begin(), removed_output_dimensions.end(),
std::greater<>());
std::sort(removed_kernel_dimensions.begin(), removed_kernel_dimensions.end(),
std::greater<>());
Shape new_input_shape = input->shape();
for (int64_t dim : removed_input_dimensions) {
new_input_shape.DeleteDimension(dim);
}
HloInstruction* kernel = instruction->mutable_operand(1);
Shape new_kernel_shape = kernel->shape();
for (int64_t dim : removed_kernel_dimensions) {
new_kernel_shape.DeleteDimension(dim);
}
Shape new_output_shape = instruction->shape();
for (int64_t dim : removed_output_dimensions) {
new_output_shape.DeleteDimension(dim);
}
auto compute_new_dimension =
[](const std::vector<int64_t>& removed_dimensions,
int64_t old_dimension) {
int64_t num_smaller = absl::c_count_if(
removed_dimensions, [old_dimension](int64_t removed_dimension) {
return removed_dimension < old_dimension;
});
return old_dimension - num_smaller;
};
new_dim_nums.set_input_batch_dimension(compute_new_dimension(
removed_input_dimensions, new_dim_nums.input_batch_dimension()));
new_dim_nums.set_input_feature_dimension(compute_new_dimension(
removed_input_dimensions, new_dim_nums.input_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.input_spatial_dimensions().size(); ++i) {
new_dim_nums.set_input_spatial_dimensions(
i, compute_new_dimension(removed_input_dimensions,
new_dim_nums.input_spatial_dimensions(i)));
}
new_dim_nums.set_output_batch_dimension(compute_new_dimension(
removed_output_dimensions, new_dim_nums.output_batch_dimension()));
new_dim_nums.set_output_feature_dimension(compute_new_dimension(
removed_output_dimensions, new_dim_nums.output_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.output_spatial_dimensions().size();
++i) {
new_dim_nums.set_output_spatial_dimensions(
i, compute_new_dimension(removed_output_dimensions,
new_dim_nums.output_spatial_dimensions(i)));
}
new_dim_nums.set_kernel_input_feature_dimension(
compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_input_feature_dimension()));
new_dim_nums.set_kernel_output_feature_dimension(
compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_output_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.kernel_spatial_dimensions().size();
++i) {
new_dim_nums.set_kernel_spatial_dimensions(
i, compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_spatial_dimensions(i)));
}
HloInstruction* reshaped_input = computation->AddInstruction(
HloInstruction::CreateReshape(new_input_shape, input));
HloInstruction* reshaped_kernel = computation->AddInstruction(
HloInstruction::CreateReshape(new_kernel_shape, kernel));
instruction->set_convolution_dimension_numbers(new_dim_nums);
instruction->set_window(new_window);
HloInstruction* new_convolution =
computation->AddInstruction(instruction->CloneWithNewOperands(
new_output_shape, {reshaped_input, reshaped_kernel}));
return computation->AddInstruction(
HloInstruction::CreateReshape(instruction->shape(), new_convolution));
}
} | #include "xla/service/convolution_4d_expander.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Convolution4DExpanderTest = HloTestBase;
TEST_F(Convolution4DExpanderTest, ConvertTo2DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 2);
}
TEST_F(Convolution4DExpanderTest, ConvertTo3DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,2,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4 pad=0_0x0_0x1_0x0_0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 3);
}
TEST_F(Convolution4DExpanderTest, ConvertTo0DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,1,1,1,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,1,1,1,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,1,1,1,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x1x1x1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 0);
}
TEST_F(Convolution4DExpanderTest, DontConvert3DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,1,1,5,20]{4,3,2,1,0} parameter(0)
kernel = f32[20,1,1,1,15]{4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,1,1,5]{4,3,2,1,0} convolution(input, kernel), dim_labels=012bf_i012o->f012b, window={size=1x1x1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 3);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
TEST_F(Convolution4DExpanderTest, DontConvertIfNoTrivialDimensionAvailable) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[2,10,2,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,2,2,2,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=2x2x2x4}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
TEST_F(Convolution4DExpanderTest, DontConvertIfPaddingIsNonzero) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4 stride=2x1x2x1 pad=1_0x0_0x0_1x0_0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_4d_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_4d_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d3d62f2-68fa-4364-8207-ec0daf9a2f91 | cpp | tensorflow/tensorflow | add_original_value | third_party/xla/xla/service/add_original_value.cc | third_party/xla/xla/service/add_original_value_test.cc | #include "xla/service/add_original_value.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_original_value.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> AddOriginalValue::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto computation : module->computations()) {
for (const auto instruction : computation->instructions()) {
auto original_value =
std::make_shared<OriginalValue>(instruction->shape());
if (instruction->opcode() == HloOpcode::kGetTupleElement) {
const auto* tuple = instruction->operand(0);
original_value->CopySubtreeFrom(*tuple->original_value(),
{instruction->tuple_index()}, {});
} else if (instruction->opcode() == HloOpcode::kTuple) {
for (int64_t operand_number = 0;
operand_number < instruction->operand_count(); ++operand_number) {
original_value->CopySubtreeFrom(
*instruction->operand(operand_number)->original_value(), {},
{operand_number});
}
} else {
for (auto& leaf : original_value->leaves()) {
leaf.second = {std::string(instruction->name()), leaf.first};
}
}
instruction->set_original_value(original_value);
changed = true;
}
}
return changed;
}
} | #include "xla/service/add_original_value.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using AddOriginalValueTest = HloTestBase;
using ::absl::string_view;
TEST_F(AddOriginalValueTest, Basic) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(s32[]{:T(256)})->u32[2]{0:T(256)}}
ENTRY test {
Arg_0.1 = s32[] parameter(0)
constant.2 = s32[] constant(32)
shift-right-logical.3 = s32[] shift-right-logical(Arg_0.1, constant.2)
convert.4 = u32[] convert(shift-right-logical.3)
reshape.5 = u32[1]{0} reshape(convert.4)
convert.6 = u32[] convert(Arg_0.1)
reshape.7 = u32[1]{0} reshape(convert.6)
ROOT concatenate.8 = u32[2]{0} concatenate(reshape.5, reshape.7), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AddOriginalValue pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(AddOriginalValueTest, Tuple) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->((f32[], f32[3]{0}), f32[2,3]{1,0})}
ENTRY test (v1: f32[], v2: f32[3], v3: f32[2,3]) -> ((f32[], f32[3]{0}), f32[2,3]{1,0}) {
v1 = f32[] parameter(0)
v2 = f32[3]{0} parameter(1)
v3 = f32[2,3]{1,0} parameter(2)
t1 = (f32[], f32[3]{0}) tuple(f32[] v1, f32[3]{0} v2)
ROOT t2 = ((f32[], f32[3]{0}), f32[2,3]{1,0}) tuple((f32[], f32[3]{0}) t1, f32[2,3]{1,0} v3)
}
)";
RunAndFilecheckHloRewrite(hlo_string, AddOriginalValue(), R"(
CHECK: %[[V1:.*]] = f32[] parameter(0), origin={{[{]}}{"[[V1]]"}
CHECK: %[[V2:.*]] = f32[3]{0} parameter(1), origin={{[{]}}{"[[V2]]"}
CHECK: %[[TUPLE:.*]] = (f32[], f32[3]{0}) tuple(%[[V1]], %[[V2]]), origin={({"[[V1]]"}, {"[[V2]]"})}
CHECK: %[[V3:.*]] = f32[2,3]{1,0} parameter(2), origin={{[{]}}{"[[V3]]"}
CHECK: ((f32[], f32[3]{0}), f32[2,3]{1,0}) tuple(%[[TUPLE]], %[[V3]]), origin={(({"v1"}, {"v2"}), {"v3"})}
)");
}
TEST_F(AddOriginalValueTest, GetTupleElement) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={()->s32[2,3]{1,0}}
ENTRY test {
constant = f32[3]{0} constant({1, 2, 3})
constant.1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
tuple = (f32[3]{0}, s32[2,3]{1,0}) tuple(f32[3]{0} constant, s32[2,3]{1,0} constant.1)
ROOT get-tuple-element = s32[2,3]{1,0} get-tuple-element((f32[3]{0}, s32[2,3]{1,0}) tuple), index=1
}
)";
RunAndFilecheckHloRewrite(hlo_string, AddOriginalValue(), R"(
CHECK: %[[CONSTANT1:.*]] = f32[3]{0} constant({1, 2, 3}), origin={{[{]}}{"[[CONSTANT1]]"}
CHECK: %[[CONSTANT2:.*]] = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } }), origin={{[{]}}{"[[CONSTANT2]]"}
CHECK: %[[TUPLE:.*]] = (f32[3]{0}, s32[2,3]{1,0}) tuple(%[[CONSTANT1]], %[[CONSTANT2]]), origin={({"[[CONSTANT1]]"}, {"[[CONSTANT2]]"})}
CHECK: s32[2,3]{1,0} get-tuple-element(%[[TUPLE]]), index=1, origin={{[{]}}{"[[CONSTANT2]]"}
)");
}
TEST_F(AddOriginalValueTest, GetTupleElementNonSymbolic) {
constexpr absl::string_view hlo_string = R"(
HloModule test, entry_computation_layout={((f32[], s32[]))->s32[]}
ENTRY test {
p = (f32[], s32[]) parameter(0)
ROOT get-tuple-element = s32[] get-tuple-element(p), index=1
}
)";
RunAndFilecheckHloRewrite(hlo_string, AddOriginalValue(), R"(
CHECK: %[[PARAM:.*]] = (f32[], s32[]) parameter(0), origin={({"p" {0}{{[}]}}, {"p" {1}})}
CHECK: s32[] get-tuple-element(%[[PARAM]]), index=1, origin={{[{]}}{"[[PARAM]]" {1}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/add_original_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/add_original_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d56329eb-0661-4eca-8bb5-3c9e3292bfbb | cpp | tensorflow/tensorflow | dump | third_party/xla/xla/service/dump.cc | third_party/xla/xla/service/dump_test.cc | #include "xla/service/dump.h"
#include <cstdint>
#include <functional>
#include <iostream>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Transforms/LocationSnapshot.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/tsl/lib/io/zlib_compression_options.h"
#include "xla/tsl/lib/io/zlib_outputbuffer.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
absl::Status CreateDirIfNeeded(const std::string& dir, tsl::Env* env) {
if (!env->IsDirectory(dir).ok()) {
absl::Status status = env->RecursivelyCreateDir(dir);
if (!status.ok()) {
status = env->IsDirectory(dir);
if (!status.ok()) {
LOG(ERROR) << "Could not create directory " << dir;
return status;
}
}
}
return absl::OkStatus();
}
std::string RenderGraph(absl::string_view label, const HloModule& module,
RenderedGraphFormat format,
bool show_fusion_subcomputations) {
HloRenderOptions hlo_render_options;
hlo_render_options.show_fusion_subcomputations = show_fusion_subcomputations;
absl::StatusOr<std::string> rendered_graph =
RenderGraph(*module.entry_computation(), label,
module.config().debug_options(), format, hlo_render_options);
if (rendered_graph.ok()) {
return std::move(rendered_graph).value();
}
return absl::StrFormat("Error rendering graph: %s",
rendered_graph.status().ToString());
}
namespace {
using absl::StrCat;
using absl::StrFormat;
using absl::string_view;
struct CanonicalDebugOptions {
explicit CanonicalDebugOptions(const DebugOptions& opts)
: dump_to(opts.xla_dump_to()),
dump_as_text(opts.xla_dump_hlo_as_text()),
dump_as_proto(opts.xla_dump_hlo_as_proto()),
dump_as_dot(opts.xla_dump_hlo_as_dot()),
dump_as_html(opts.xla_dump_hlo_as_html()),
dump_as_url(opts.xla_dump_hlo_as_url()),
dump_fusion_visualization(opts.xla_dump_fusion_visualization()),
dump_snapshots(opts.xla_dump_hlo_snapshots()),
dump_include_timestamp(opts.xla_dump_include_timestamp()),
dump_max_hlo_modules(opts.xla_dump_max_hlo_modules()),
dump_module_metadata(opts.xla_dump_module_metadata()),
dump_compress_protos(opts.xla_dump_compress_protos()),
dump_hlo_metadata(!opts.xla_dump_disable_metadata()),
dump_as_long_text(opts.xla_dump_hlo_as_long_text()),
dump_mlir_pretty_form(opts.xla_dump_enable_mlir_pretty_form()),
dump_large_constants(opts.xla_dump_large_constants()),
syntax_sugar_async_ops(opts.xla_syntax_sugar_async_ops()) {
bool output_format_other_than_url_specified =
opts.xla_dump_hlo_as_text() || opts.xla_dump_hlo_as_proto() ||
opts.xla_dump_hlo_as_dot() || opts.xla_dump_hlo_as_html() ||
opts.xla_dump_hlo_snapshots();
bool output_format_specified =
output_format_other_than_url_specified || opts.xla_dump_hlo_as_url();
if (!output_format_specified) {
dump_as_text = true;
}
if (!opts.xla_enable_dumping()) {
dump_to = "";
}
if (opts.xla_dump_to().empty() && output_format_other_than_url_specified) {
dump_to = "-";
}
if (!opts.xla_dump_hlo_module_re().empty()) {
std::string pattern = opts.xla_dump_hlo_module_re();
should_dump_module = [pattern](string_view module_name) {
return RE2::PartialMatch(module_name, pattern);
};
} else if (!opts.xla_dump_hlo_pass_re().empty() ||
!opts.xla_dump_to().empty() || output_format_specified) {
should_dump_module = [](string_view) { return true; };
} else {
should_dump_module = [](string_view) { return false; };
}
if (!opts.xla_dump_hlo_pass_re().empty()) {
std::string pattern = opts.xla_dump_hlo_pass_re();
should_dump_pass = [pattern](string_view pass_name) {
return RE2::PartialMatch(pass_name, pattern);
};
} else {
should_dump_pass = [](string_view) { return false; };
}
if (!opts.xla_dump_hlo_pipeline_re().empty()) {
std::string pattern = opts.xla_dump_hlo_pipeline_re();
should_dump_pipeline = [pattern](string_view pipeline_name) {
return RE2::PartialMatch(pipeline_name, pattern);
};
} else {
should_dump_pipeline = [](string_view) { return true; };
}
std::string dump_to_lower = absl::AsciiStrToLower(dump_to);
if (dump_to_lower == "sponge" ||
dump_to_lower == "test_undeclared_outputs_dir") {
if (!tsl::io::GetTestUndeclaredOutputsDir(&dump_to)) {
LOG(ERROR) << "--xla_dump_to=" << opts.xla_dump_to()
<< ", but environment variable TEST_UNDECLARED_OUTPUTS_DIR "
"is not set, so cannot dump anywhere.";
should_dump_module = [](string_view) { return false; };
should_dump_pass = [](string_view) { return false; };
should_dump_pipeline = [](string_view) { return false; };
}
}
}
bool dumping_to_stdout() const { return dump_to == "-"; }
std::string dump_to;
std::function<bool(string_view module_name)> should_dump_module;
std::function<bool(string_view pass_name)> should_dump_pass;
std::function<bool(string_view pipeline_name)> should_dump_pipeline;
bool dump_as_text;
bool dump_as_proto;
bool dump_as_dot;
bool dump_as_html;
bool dump_as_url;
bool dump_fusion_visualization;
bool dump_snapshots;
bool dump_include_timestamp;
int64_t dump_max_hlo_modules;
bool dump_module_metadata;
bool dump_compress_protos;
bool dump_hlo_metadata;
bool dump_as_long_text;
bool dump_mlir_pretty_form;
bool dump_large_constants;
bool syntax_sugar_async_ops;
};
class DataProducer {
public:
void Append(std::function<std::string()> produce_func) {
produce_funcs_.push(std::move(produce_func));
}
std::function<std::string()> Next() {
if (produce_funcs_.empty()) {
return nullptr;
}
auto next = std::move(produce_funcs_.front());
produce_funcs_.pop();
return next;
}
private:
std::queue<std::function<std::string()>> produce_funcs_;
};
static absl::Status WriteStringToFile(tsl::Env* env, const std::string& fname,
DataProducer& data_producer,
bool compressed) {
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file));
if (compressed) {
auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP();
tsl::io::ZlibOutputBuffer gz_file(file.get(), gz_opts.input_buffer_size,
gz_opts.output_buffer_size, gz_opts);
TF_RETURN_IF_ERROR(gz_file.Init());
while (auto next_producer = data_producer.Next()) {
TF_RETURN_IF_ERROR(gz_file.Append(next_producer()));
}
return gz_file.Close();
} else {
while (auto next_producer = data_producer.Next()) {
TF_RETURN_IF_ERROR(file->Append(next_producer()));
}
return file->Close();
}
}
static absl::Status WriteStringToFile(tsl::Env* env, const std::string& fname,
absl::string_view data, bool compressed) {
if (!compressed) {
return tsl::WriteStringToFile(env, fname, data);
}
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file));
auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP();
tsl::io::ZlibOutputBuffer gz_file(file.get(), gz_opts.input_buffer_size,
gz_opts.output_buffer_size, gz_opts);
TF_RETURN_IF_ERROR(gz_file.Init());
TF_RETURN_IF_ERROR(gz_file.Append(data));
return gz_file.Close();
}
static std::optional<std::string> GetDumpFilePath(
string_view filename, const CanonicalDebugOptions& opts) {
if (opts.dumping_to_stdout()) {
LOG(ERROR) << "Refusing to write " << filename
<< " to stdout. Pass --xla_dump_to=<path> to write to a file.";
return std::nullopt;
}
if (opts.dump_to.empty()) {
return std::nullopt;
}
const std::string& dir = opts.dump_to;
VLOG(1) << "Dumping " << filename << " to " << dir;
tsl::Env* env = tsl::Env::Default();
if (!CreateDirIfNeeded(dir, env).ok()) {
return std::nullopt;
}
if (opts.dump_max_hlo_modules > 0) {
std::vector<std::string> matches;
auto pattern = tsl::io::JoinPath(dir, "*module_*.*");
auto status = env->GetMatchingPaths(pattern, &matches);
if (!status.ok()) {
LOG(ERROR) << "Could not get matching paths for pattern " << pattern
<< ": " << status;
}
static const LazyRE2 module_id_regex = {R"(.*module_(\d+)\..*)"};
absl::flat_hash_set<int64_t> dumped_module_ids;
for (const std::string& match : matches) {
int64_t dumped_module_id;
if (RE2::FullMatch(match, *module_id_regex, &dumped_module_id)) {
dumped_module_ids.insert(dumped_module_id);
}
}
if (dumped_module_ids.size() >= opts.dump_max_hlo_modules) {
int64_t module_id;
if (RE2::FullMatch(filename, *module_id_regex, &module_id) &&
!dumped_module_ids.contains(module_id)) {
LOG(ERROR) << "Have already dumped " << dumped_module_ids.size()
<< " modules, more than the limit of "
<< opts.dump_max_hlo_modules;
return std::nullopt;
}
}
}
return tsl::io::JoinPath(dir, SanitizeFileName(std::string(filename)));
}
static std::optional<std::string> DumpToFileInDirImpl(
string_view filename, string_view contents,
const CanonicalDebugOptions& opts, bool compress = false) {
auto file_path = GetDumpFilePath(filename, opts);
if (!file_path) return std::nullopt;
auto status =
WriteStringToFile(tsl::Env::Default(), *file_path, contents, compress);
if (!status.ok()) {
LOG(ERROR) << "Could not write XLA debug data to " << *file_path << ": "
<< status;
return std::nullopt;
}
return file_path;
}
static std::optional<std::string> DumpToFileInDirImpl(
string_view filename, DataProducer& data_producer,
const CanonicalDebugOptions& opts, bool compress = false) {
auto file_path = GetDumpFilePath(filename, opts);
if (!file_path) return std::nullopt;
auto status = WriteStringToFile(tsl::Env::Default(), *file_path,
data_producer, compress);
if (!status.ok()) {
LOG(ERROR) << "Could not write XLA debug data to " << *file_path << ": "
<< status;
return std::nullopt;
}
return file_path;
}
static absl::Mutex stdout_dump_mutex(absl::kConstInit);
static std::optional<std::string> DumpToFileInDirOrStdoutImpl(
string_view filename, string_view contents,
const CanonicalDebugOptions& opts) {
if (opts.dumping_to_stdout()) {
absl::MutexLock lock(&stdout_dump_mutex);
std::cout << "*** Begin " << filename << " ***\n"
<< contents << "\n*** End " << filename << " ***" << std::endl;
return std::nullopt;
}
return DumpToFileInDirImpl(filename, contents, opts);
}
static std::optional<std::string> DumpToFileInDirOrStdoutImpl(
string_view filename, DataProducer& data_producer,
const CanonicalDebugOptions& opts) {
if (opts.dumping_to_stdout()) {
absl::MutexLock lock(&stdout_dump_mutex);
std::cout << "*** Begin " << filename << " ***\n";
while (auto next_producer = data_producer.Next()) {
std::cout << next_producer();
}
std::cout << "\n*** End " << filename << " ***" << std::endl;
return std::nullopt;
}
return DumpToFileInDirImpl(filename, data_producer, opts);
}
static bool IsTrivial(const HloComputation& computation) {
const HloInstruction* root = computation.root_instruction();
return absl::c_all_of(root->operands(),
[&](const HloInstruction* op) {
return op->opcode() == HloOpcode::kParameter;
}) &&
root->opcode() != HloOpcode::kFusion;
}
static std::vector<std::string> DumpHloModuleImpl(
const HloModule& module, const BufferAssignment* buffer_assn,
string_view prefix, string_view suffix, const CanonicalDebugOptions& opts) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaDumpHloModule:#module=%s,program_id=%d#",
module.name(), module.unique_id());
});
std::string filename = FilenameFor(module, prefix, suffix);
std::vector<std::optional<std::string>> file_paths;
if (opts.dump_as_text) {
auto print_options = opts.dump_as_long_text
? HloPrintOptions::Default()
: HloPrintOptions::ShortParsable();
print_options.set_print_large_constants(opts.dump_large_constants);
print_options.set_print_control_dependencies(true);
print_options.set_print_operand_index_annotation_interval(5);
print_options.set_print_backend_config(true);
print_options.set_print_metadata(opts.dump_hlo_metadata);
print_options.set_print_name_after_closing_brace(true);
print_options.set_syntax_sugar_async_ops(opts.syntax_sugar_async_ops);
file_paths.push_back(DumpToFileInDirOrStdoutImpl(
StrCat(filename, ".txt"), module.ToString(print_options), opts));
if (buffer_assn) {
DataProducer data_producer;
data_producer.Append([&] { return buffer_assn->ToString(); });
data_producer.Append([&] { return "\n\n"; });
data_producer.Append(
[&] { return buffer_assn->hlo_live_range().ToString(); });
file_paths.push_back(DumpToFileInDirOrStdoutImpl(
StrCat(filename, "-buffer-assignment.txt"), data_producer, opts));
}
}
if (opts.dump_as_proto) {
HloProto module_proto =
buffer_assn ? MakeHloProto(module, *buffer_assn) : MakeHloProto(module);
std::string pb;
if (!tsl::SerializeToStringDeterministic(module_proto, &pb)) {
pb = "Failed to serialize HLO module proto.";
}
file_paths.push_back(DumpToFileInDirImpl(
StrCat(filename, opts.dump_compress_protos ? ".hlo.pb.gz" : ".hlo.pb"),
pb, opts, opts.dump_compress_protos));
}
if (opts.dump_as_dot) {
file_paths.push_back(DumpToFileInDirImpl(
StrFormat("%s.dot", filename),
RenderGraph(filename, module, RenderedGraphFormat::kDot), opts));
}
if (opts.dump_as_html) {
file_paths.push_back(DumpToFileInDirImpl(
StrFormat("%s.html", filename),
RenderGraph(filename, module, RenderedGraphFormat::kHtml), opts));
if (absl::StrContains(filename, kAfterOptimizationsDumpName)) {
file_paths.push_back(DumpToFileInDirImpl(
StrFormat("%s.top_level.html", filename),
RenderGraph(filename, module, RenderedGraphFormat::kHtml, false),
opts));
}
}
if (opts.dump_fusion_visualization) {
for (const HloComputation* computation :
module.MakeNonfusionComputations()) {
if (IsTrivial(*computation)) {
VLOG(1) << "Skipping computation " << computation->name()
<< " as trivial";
continue;
}
absl::StatusOr<std::string> rendered_graph =
WrapFusionExplorer(*computation);
if (!rendered_graph.ok()) {
VLOG(1) << "Skipping fusion visualization"
<< " for computation " << computation->name()
<< " due to: " << rendered_graph.status();
continue;
}
file_paths.push_back(DumpToFileInDirImpl(
FilenameFor(module, computation->name(), "_fusion.html"),
*rendered_graph, opts));
}
}
if (opts.dump_as_url) {
std::string url = RenderGraph(filename, module, RenderedGraphFormat::kUrl);
std::cout << filename << " --> " << url << std::endl;
if (!opts.dumping_to_stdout()) {
file_paths.push_back(
DumpToFileInDirImpl(StrFormat("%s.url", filename), url, opts));
}
}
std::vector<std::string> dumped_file_paths;
for (const std::optional<std::string>& path : file_paths) {
if (path.has_value()) {
dumped_file_paths.push_back(*path);
}
}
if (!dumped_file_paths.empty()) {
LOG_FIRST_N(INFO, 1) << "HloModule dump enabled with path prefix: "
<< prefix << ", suffix: " << suffix;
}
return dumped_file_paths;
}
static void DumpHloModuleMetadata(
const HloModuleMetadataProto& metadata, const CanonicalDebugOptions& opts,
absl::flat_hash_set<int64_t>* dumped_module_ids) {
if (!dumped_module_ids->insert(metadata.canonical_module_id()).second) {
return;
}
std::string filename = absl::StrFormat("module_%04d.metadata.textproto",
metadata.canonical_module_id());
std::string content;
if (tsl::protobuf::TextFormat::PrintToString(metadata, &content)) {
DumpToFileInDirImpl(filename, content, opts);
} else {
LOG(ERROR) << "Failed to convert HloModuleMetadataProto to text.";
}
}
static absl::Mutex mu(absl::kConstInit);
static auto& module_id_to_step_number ABSL_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, int64_t>();
static auto& module_id_to_timestamp ABSL_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, uint64_t>();
int64_t StepNumberForModule(const HloModule& module) {
absl::MutexLock lock(&mu);
return module_id_to_step_number[module.unique_id()]++;
}
}
std::string TimestampFor(const HloModule& module) {
if (!module.config().debug_options().xla_dump_include_timestamp()) {
return "";
}
absl::MutexLock lock(&mu);
auto timestamp_emplace = module_id_to_timestamp.try_emplace(
module.unique_id(), tsl::Env::Default()->NowMicros());
return std::to_string(timestamp_emplace.first->second);
}
std::string FilenameFor(int unique_id, string_view module_name,
string_view prefix, string_view suffix) {
std::string filename;
if (!prefix.empty()) {
absl::StrAppend(&filename, prefix, ".");
}
absl::StrAppendFormat(&filename, "module_%04d", unique_id);
if (!module_name.empty()) {
absl::StrAppend(&filename, ".", module_name);
}
absl::StrAppend(&filename, ".", suffix);
if (!module_name.empty() && filename.size() > 255) {
return FilenameFor(unique_id, "", prefix, suffix);
}
return filename;
}
std::string FilenameFor(const HloModule& module, string_view prefix,
string_view suffix) {
return FilenameFor(module.unique_id(), module.name(), prefix, suffix);
}
void DumpToFileInDir(const HloModule& module, string_view file_prefix,
string_view file_suffix, string_view contents) {
DumpToFileInDir(module.config().debug_options(),
FilenameFor(module, file_prefix, file_suffix), contents);
}
void DumpToFileInDir(const DebugOptions& debug_options,
absl::string_view filename, absl::string_view contents) {
DumpToFileInDirImpl(filename, contents, CanonicalDebugOptions(debug_options));
}
void DumpToFileInDirOrStdout(const HloModule& module, string_view file_prefix,
string_view file_suffix, string_view contents) {
DumpToFileInDirOrStdoutImpl(
FilenameFor(module, file_prefix, file_suffix), contents,
CanonicalDebugOptions(module.config().debug_options()));
}
void DumpToFileInDirOrStdout(const DebugOptions& debug_options, int unique_id,
string_view module_name, string_view file_prefix,
string_view file_suffix, string_view contents) {
DumpToFileInDirOrStdoutImpl(
FilenameFor(unique_id, module_name, file_prefix, file_suffix), contents,
CanonicalDebugOptions(debug_options));
}
void DumpToFileInDirOrStdout(const HloModule& module, string_view file_prefix,
mlir::Operation* op) {
CanonicalDebugOptions opts(module.config().debug_options());
if (opts.dumping_to_stdout()) return op->dump();
mlir::OpPrintingFlags print_flags = mlir::OpPrintingFlags();
if (file_prefix == "lmhlo") {
print_flags.enableDebugInfo(true,
opts.dump_mlir_pretty_form);
}
std::string content;
llvm::raw_string_ostream string_stream(content);
op->print(string_stream, print_flags);
DumpToFileInDirOrStdoutImpl(FilenameFor(module, file_prefix, "mlir"), content,
opts);
}
void DumpProtobufToFile(const tsl::protobuf::Message& proto,
const DebugOptions& debug_options,
absl::string_view filename,
absl::AnyInvocable<absl::StatusOr<std::string>(
tsl::Env*, const tsl::protobuf::Message&)>
text_formatter) {
CanonicalDebugOptions opts(debug_options);
tsl::Env* env = tsl::Env::Default();
const std::string& dir = opts.dump_to;
if (dir.empty()) {
return;
}
if (!CreateDirIfNeeded(dir, env).ok()) {
return;
}
const std::string path = tsl::io::JoinPath(dir, filename);
absl::Status status;
if (opts.dump_as_text) {
if (text_formatter) {
auto written_proto = text_formatter(env, proto);
if (!written_proto.status().ok()) {
LOG(ERROR) << "Failure with custom proto text formatting function. "
<< "Could not write XLA data to " << filename << ": "
<< written_proto.status();
return;
}
status = tsl::WriteStringToFile(env, absl::StrCat(path, ".txt"),
written_proto.value());
} else {
status = tsl::WriteTextProto(env, absl::StrCat(path, ".txt"), proto);
}
} else {
status = tsl::WriteBinaryProto(env, absl::StrCat(path, ".pb"), proto);
}
if (!status.ok()) {
LOG(ERROR) << "Could not write XLA data to " << filename << ": " << status;
}
}
void DumpPerModuleProtobufToFile(const HloModule& module,
const tsl::protobuf::Message& proto,
const DebugOptions& debug_options,
absl::string_view name,
absl::AnyInvocable<absl::StatusOr<std::string>(
tsl::Env*, const tsl::protobuf::Message&)>
text_formatter) {
const std::string filename = FilenameFor(module, TimestampFor(module), name);
DumpProtobufToFile(proto, debug_options, filename, std::move(text_formatter));
}
std::vector<std::string> DumpHloModuleIfEnabled(const HloModule& module,
string_view name) {
CanonicalDebugOptions opts(module.config().debug_options());
if (opts.should_dump_module(module.name())) {
return DumpHloModuleImpl(module, nullptr,
TimestampFor(module), name, opts);
}
return {};
}
std::vector<std::string> DumpHloModuleIfEnabled(
const HloModule& module, const BufferAssignment& buffer_assn,
string_view name) {
CanonicalDebugOptions opts(module.config().debug_options());
if (opts.should_dump_module(module.name())) {
DumpHloModuleImpl(module, &buffer_assn, TimestampFor(module), name, opts);
}
return {};
}
bool DumpingEnabledForHloModule(string_view hlo_module_name,
const DebugOptions& opts) {
return CanonicalDebugOptions(opts).should_dump_module(hlo_module_name);
}
bool DumpingEnabledForHloPass(string_view hlo_pass_name,
const DebugOptions& opts) {
return CanonicalDebugOptions(opts).should_dump_pass(hlo_pass_name);
}
bool DumpingToStdout(const DebugOptions& opts) {
return CanonicalDebugOptions(opts).dumping_to_stdout();
}
std::vector<std::string> DumpHloModuleBetweenPassesIfEnabled(
string_view pipeline_name, string_view before_pass_name,
string_view after_pass_name, const HloModule& module) {
CanonicalDebugOptions opts(module.config().debug_options());
if (!opts.should_dump_module(module.name())) {
return {};
}
if (!opts.should_dump_pass(before_pass_name) &&
!opts.should_dump_pass(after_pass_name)) {
return {};
}
if (!opts.should_dump_pipeline(pipeline_name)) {
return {};
}
int64_t step_number = StepNumberForModule(module);
std::string timestamp = TimestampFor(module);
std::string filename_suffix =
StrFormat("%04d.%s.after_%s.before_%s", step_number, pipeline_name,
after_pass_name, before_pass_name);
return DumpHloModuleImpl(module, nullptr, timestamp,
filename_suffix, opts);
}
void DumpHloModuleDuringPassIfEnabled(string_view pass_name,
string_view step_name,
const HloModule& module) {
CanonicalDebugOptions opts(module.config().debug_options());
if (!opts.should_dump_module(module.name()) ||
!opts.should_dump_pass(pass_name)) {
return;
}
int64_t step_number = StepNumberForModule(module);
std::string timestamp = TimestampFor(module);
std::string filename_suffix =
StrFormat("%04d.%s.%s", step_number, pass_name, step_name);
DumpHloModuleImpl(module, nullptr, timestamp, filename_suffix,
opts);
}
void DumpHloSnapshotIfEnabled(const HloModule& module,
const HloSnapshot& snapshot) {
CanonicalDebugOptions opts(module.config().debug_options());
if (!opts.should_dump_module(module.name()) || !opts.dump_snapshots) {
return;
}
int64_t execution_count;
uint64_t timestamp;
{
static auto& module_id_to_execution_count ABSL_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, int64_t>();
absl::MutexLock lock(&mu);
execution_count = module_id_to_execution_count[module.unique_id()]++;
auto timestamp_emplace = module_id_to_timestamp.try_emplace(
module.unique_id(), tsl::Env::Default()->NowMicros());
timestamp = timestamp_emplace.first->second;
}
std::string filename =
StrCat(FilenameFor(module, std::to_string(timestamp),
StrFormat("execution_%04d", execution_count)),
".hlo_snapshot.pb");
if (opts.dumping_to_stdout()) {
LOG(ERROR) << "Refusing to write HLO snapshot proto for " << filename
<< " to stdout. Pass --xla_dump_to=<path> to write to a file.";
return;
}
std::string pb;
if (!tsl::SerializeToStringDeterministic(snapshot, &pb)) {
LOG(ERROR) << "Failed to serialize HLO snapshot proto " << filename;
}
DumpToFileInDirImpl(filename, pb, opts);
}
void DumpHloSnapshotIfEnabled(const HloSnapshot& snapshot,
const DebugOptions& opts) {
CanonicalDebugOptions canonical_opts(opts);
std::string name = snapshot.hlo().hlo_module().name();
if (!canonical_opts.should_dump_module(name) ||
!canonical_opts.dump_snapshots) {
return;
}
int64_t execution_count;
{
static auto& module_name_to_execution_count ABSL_GUARDED_BY(mu) =
*new absl::flat_hash_map<std::string, int64_t>();
absl::MutexLock lock(&mu);
execution_count = module_name_to_execution_count[name]++;
}
std::string filename = StrFormat("module_%s.execution_%04d.hlo_snapshot.pb",
name, execution_count);
if (canonical_opts.dumping_to_stdout()) {
LOG(ERROR) << "Refusing to write HLO snapshot proto for " << filename
<< " to stdout. Pass --xla_dump_to=<path> to write to a file.";
return;
}
std::string pb;
if (!tsl::SerializeToStringDeterministic(snapshot, &pb)) {
LOG(ERROR) << "Failed to serialize HLO snapshot proto " << filename;
}
DumpToFileInDirImpl(filename, pb, canonical_opts);
}
void DumpHloModuleMetadataIfEnabled(const std::vector<HloModule*>& modules) {
absl::flat_hash_set<int64_t> dumped_module_ids;
for (const HloModule* module : modules) {
CanonicalDebugOptions opts(module->config().debug_options());
if (!opts.dump_module_metadata) {
continue;
}
DumpHloModuleMetadata(module->metadata().proto(), opts, &dumped_module_ids);
const std::optional<HloModuleMetadataProto>& prepartitioning_metadata =
module->metadata().prepartitioning_metadata();
if (prepartitioning_metadata.has_value()) {
DumpHloModuleMetadata(*prepartitioning_metadata, opts,
&dumped_module_ids);
}
}
}
absl::Status DumpProtoToDirectory(const tsl::protobuf::Message& message,
const std::string& directory,
const std::string& file_name,
std::string* full_path) {
tsl::Env* env = tsl::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(directory));
TF_RETURN_IF_ERROR(CreateDirIfNeeded(directory, env));
std::string safe_file_name = SanitizeFileName(file_name) + ".pb";
std::string full_path_impl;
if (!full_path) {
full_path = &full_path_impl;
}
*full_path = tsl::io::JoinPath(directory, safe_file_name);
return tsl::WriteBinaryProto(env, *full_path, message);
}
} | #include "xla/service/dump.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::IsEmpty;
TEST(DumpHloIfEnabled, LargeConstantElided) {
HloModuleConfig config;
DebugOptions options = config.debug_options();
auto env = tsl::Env::Default();
std::string dump_dir;
EXPECT_TRUE(env->LocalTempFilename(&dump_dir));
options.set_xla_dump_to(dump_dir);
options.set_xla_dump_hlo_as_text(true);
options.set_xla_dump_large_constants(false);
config.set_debug_options(options);
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[11] parameter(0)
c = s32[11] constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
ROOT x = s32[11] multiply(p0, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnUnverifiedModule(kModuleStr, config));
std::string dump_name = "dump";
auto paths = DumpHloModuleIfEnabled(*m, dump_name);
EXPECT_EQ(paths.size(), 1);
std::string data;
EXPECT_TRUE(ReadFileToString(env, paths[0], &data).ok());
EXPECT_TRUE(absl::StrContains(data, "{...}"));
}
TEST(DumpHloIfEnabled, LargeConstantPrinted) {
HloModuleConfig config;
DebugOptions options = config.debug_options();
auto env = tsl::Env::Default();
std::string dump_dir;
EXPECT_TRUE(env->LocalTempFilename(&dump_dir));
options.set_xla_dump_to(dump_dir);
options.set_xla_dump_hlo_as_text(true);
options.set_xla_dump_large_constants(true);
config.set_debug_options(options);
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[11] parameter(0)
c = s32[11] constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
ROOT x = s32[11] multiply(p0, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnUnverifiedModule(kModuleStr, config));
std::string dump_name = "dump";
auto paths = DumpHloModuleIfEnabled(*m, dump_name);
EXPECT_EQ(paths.size(), 1);
std::string data;
EXPECT_TRUE(ReadFileToString(env, paths[0], &data).ok());
EXPECT_TRUE(!absl::StrContains(data, "{...}"));
}
TEST(DumpTest, NoDumpingToFileWhenNotEnabled) {
std::string filename =
tsl::io::JoinPath(tsl::testing::TmpDir(), "disable_override");
std::string contents = "hello";
DebugOptions options;
options.set_xla_enable_dumping(false);
options.set_xla_dump_to(filename);
DumpToFileInDir(options, "disable_override", contents);
std::vector<std::string> matches;
TF_ASSERT_OK(tsl::Env::Default()->GetMatchingPaths(filename, &matches));
EXPECT_THAT(matches, IsEmpty());
}
TEST(DumpTest, DumpingToFileWorksWhenEnabled) {
std::string filename =
tsl::io::JoinPath(tsl::testing::TmpDir(), "enable_dumping");
std::string contents = "hello";
DebugOptions options;
options.set_xla_dump_to(tsl::testing::TmpDir());
options.set_xla_enable_dumping(true);
DumpToFileInDir(options, "enable_dumping", contents);
std::string real_contents;
TF_ASSERT_OK(
tsl::ReadFileToString(tsl::Env::Default(), filename, &real_contents));
EXPECT_EQ(contents, real_contents);
}
TEST(DumpTest, DumpProtobufToFileWhenEnabled) {
HloModuleProto module;
module.set_name("hello");
std::string filename =
tsl::io::JoinPath(tsl::testing::TmpDir(), "enable_proto_dumping.txt");
DebugOptions options;
options.set_xla_dump_to(tsl::testing::TmpDir());
options.set_xla_enable_dumping(true);
DumpProtobufToFile(module, options, "enable_proto_dumping");
HloModuleProto mod;
TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), filename, &mod));
EXPECT_EQ(mod.name(), module.name());
}
TEST(DumpTest, DumpProtobufToFileWhenDisabled) {
HloModuleProto module;
module.set_name("hello");
std::string filename =
tsl::io::JoinPath(tsl::testing::TmpDir(), "disable_proto_dumping.txt");
DebugOptions options;
options.set_xla_dump_to(tsl::testing::TmpDir());
options.set_xla_enable_dumping(false);
DumpProtobufToFile(module, options, "disable_proto_dumping");
std::vector<std::string> matches;
TF_ASSERT_OK(tsl::Env::Default()->GetMatchingPaths(filename, &matches));
EXPECT_THAT(matches, IsEmpty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dump.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dump_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bbfffe14-4907-451f-87ca-3aac6ed2c3c7 | cpp | tensorflow/tensorflow | result_caster | third_party/xla/xla/service/result_caster.cc | third_party/xla/xla/service/result_caster_test.cc | #include "xla/service/result_caster.h"
#include <optional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace {
absl::StatusOr<std::optional<Shape>> MaybeInferShape(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kDot:
return ShapeInference::InferDotOpShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->dot_dimension_numbers(),
std::nullopt,
Cast<HloDotInstruction>(instruction)->sparsity());
case HloOpcode::kConvolution:
return ShapeInference::InferConvolveShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->feature_group_count(), instruction->batch_group_count(),
instruction->window(), instruction->convolution_dimension_numbers(),
std::nullopt);
default:
return std::optional<Shape>(std::nullopt);
}
}
}
bool ResultCaster::InstructionMatchesPattern(HloInstruction* instruction) {
auto status_or_inferred_shape = MaybeInferShape(instruction);
if (!status_or_inferred_shape.ok() ||
!status_or_inferred_shape->has_value()) {
return false;
}
const Shape& inferred_shape = status_or_inferred_shape.value().value();
return inferred_shape.element_type() != instruction->shape().element_type() &&
ShapeUtil::HigherPrecisionElementType(inferred_shape,
instruction->shape()) ==
inferred_shape.element_type();
}
absl::StatusOr<HloInstruction*> ResultCaster::ExpandInstruction(
HloInstruction* instruction) {
auto* computation = instruction->parent();
Shape inferred_shape = MaybeInferShape(instruction).value().value();
*inferred_shape.mutable_layout() = instruction->shape().layout();
auto clone = computation->AddInstruction(
instruction->CloneWithNewShape(inferred_shape));
return computation->AddInstruction(
HloInstruction::CreateConvert(instruction->shape(), clone));
}
} | #include "xla/service/result_caster.h"
#include <memory>
#include <tuple>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class ResultCasterTest
: public HloTestBase,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {};
TEST_P(ResultCasterTest, CastResultWhenNeeded) {
PrimitiveType lhs_type, rhs_type, result_type;
std::tie(lhs_type, rhs_type, result_type) = GetParam();
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool casted, ResultCaster().Run(module.get()));
const PrimitiveType accumulation_type =
primitive_util::HigherPrecisionType(lhs_type, rhs_type);
const bool should_cast =
result_type != accumulation_type &&
primitive_util::HigherPrecisionType(accumulation_type, result_type) ==
accumulation_type;
EXPECT_EQ(casted, should_cast);
auto lhs = op::Parameter(0);
auto rhs = op::Parameter(1);
auto original_shape_str = absl::Substitute(
"$0[2,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type));
auto accumulation_shape_str = absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(accumulation_type));
if (should_cast) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Convert(AllOf(op::Dot(lhs, rhs),
op::Shape(accumulation_shape_str))),
op::Shape(original_shape_str)));
} else {
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(lhs, rhs), op::Shape(original_shape_str)));
}
}
INSTANTIATE_TEST_SUITE_P(All, ResultCasterTest,
::testing::Values(std::make_tuple(BF16, BF16, S32),
std::make_tuple(F32, F32, S32),
std::make_tuple(F32, BF16, F32),
std::make_tuple(BF16, F32, F64)));
TEST_F(ResultCasterTest, SparseDot) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
p0 = bf16[2,16]{1,0} parameter(0)
p1 = f32[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool casted, ResultCaster().Run(module.get()));
EXPECT_TRUE(casted);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Convert(::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot,
{op::Parameter(0), op::Parameter(1), op::Parameter(2)}))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/result_caster.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/result_caster_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5feb927-0ca3-4b2d-b028-8915edaf8170 | cpp | tensorflow/tensorflow | all_gather_combiner | third_party/xla/xla/service/all_gather_combiner.cc | third_party/xla/xla/service/all_gather_combiner_test.cc | #include "xla/service/all_gather_combiner.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
int64_t FindMostFrequentGatherDim(
absl::Span<HloInstruction* const> to_combine) {
assert(!to_combine.empty());
int64_t min_rank = std::numeric_limits<int64_t>::max();
std::vector<int64_t> frequency;
for (const HloInstruction* it : to_combine) {
int64_t dim = Cast<HloAllGatherInstruction>(it)->all_gather_dimension();
frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())),
0);
frequency[dim]++;
min_rank = std::min(min_rank, it->shape().rank());
}
int64_t most_frequent_dim = std::distance(
frequency.begin(), std::max_element(frequency.begin(), frequency.end()));
return most_frequent_dim < min_rank ? most_frequent_dim : 0;
}
absl::Status CombineAllGathers(absl::Span<HloInstruction* const> to_combine,
bool combine_by_dim) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " AllGather ops";
HloComputation& computation = *to_combine.back()->parent();
std::vector<HloInstruction*> operands;
std::vector<std::optional<std::vector<int64_t>>> operand_permutations;
std::vector<Shape> output_shapes;
int64_t most_frequent_dim = FindMostFrequentGatherDim(to_combine);
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGather);
const auto* ag = Cast<HloAllGatherInstruction>(hlo);
TF_RET_CHECK(hlo->operand_count() == 1);
TF_RET_CHECK(hlo->shape().IsArray());
TF_RET_CHECK(!combine_by_dim ||
ag->all_gather_dimension() == most_frequent_dim);
HloInstruction* operand = hlo->operands().front();
operands.push_back(operand);
operand_permutations.emplace_back();
output_shapes.push_back(hlo->shape());
if (ag->all_gather_dimension() != most_frequent_dim) {
const Shape& operand_shape = operand->shape();
auto& perm = operand_permutations.back();
perm = std::vector<int64_t>(operand_shape.rank());
std::iota(perm->begin(), perm->end(), 0);
std::swap((*perm)[most_frequent_dim],
(*perm)[ag->all_gather_dimension()]);
operands.back() =
computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*perm, operand_shape), operand));
output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape());
}
}
HloInstruction* combined;
combined = computation.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeTupleShape(output_shapes), operands, most_frequent_dim,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloAllGatherInstruction>(to_combine.front())
->use_global_device_ids()));
combined->set_sharding(
hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine));
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
HloInstruction* replacement = computation.AddInstruction(
HloInstruction::CreateGetTupleElement(combined, i));
if (operand_permutations[i]) {
replacement = computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*operand_permutations[i],
replacement->shape()),
replacement));
}
TF_RETURN_IF_ERROR(
computation.ReplaceInstruction(to_combine[i], replacement));
}
return absl::OkStatus();
}
}
std::string& AllGatherCombiner::GetGroupKeyExtraArgs(
AllGatherCombiner::GroupKey& key) {
return std::get<6>(key);
}
std::optional<AllGatherCombiner::GroupKey>
AllGatherCombiner::CombineKey(const HloInstruction* instruction,
const HloDomainMap& domain_map,
bool combine_by_dim,
bool combine_different_dtypes) {
if (instruction->opcode() != HloOpcode::kAllGather) {
return std::nullopt;
}
std::vector<std::vector<int64_t>> replica_groups;
const auto* ag = Cast<HloAllGatherInstruction>(instruction);
replica_groups.reserve(ag->replica_groups().size());
for (const ReplicaGroup& replica_group : ag->replica_groups()) {
replica_groups.push_back(
std::vector<int64_t>(replica_group.replica_ids().begin(),
replica_group.replica_ids().end()));
}
int64_t ag_dim_key = combine_by_dim ? ag->all_gather_dimension() : -1;
PrimitiveType data_type = combine_different_dtypes
? PRIMITIVE_TYPE_INVALID
: ag->shape().element_type();
return GroupKey{ag_dim_key,
domain_map.GetDomainMetadataId(ag),
ag->channel_id().has_value(),
ag->use_global_device_ids(),
data_type,
replica_groups,
""};
}
AllGatherCombiner::AllGatherCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count,
bool combine_by_dim,
bool combine_different_dtypes)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count),
combine_by_dim_(combine_by_dim),
combine_different_dtypes_(combine_different_dtypes) {}
absl::StatusOr<bool> AllGatherCombiner::RunWithKeyCombiner(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::FunctionRef<std::optional<AllGatherCombiner::GroupKey>(
const HloInstruction*, const HloDomainMap&, bool, bool)>
combine_key) {
VLOG(1) << "Running AllGatherCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip AllGatherCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedCollective(*module,
HloOpcode::kAllGather)) {
VLOG(1) << "Skip AllGatherCombiner because the module contains "
"all-gather with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn = [&](const HloInstruction* instruction) {
return combine_key(instruction, *domain_map, combine_by_dim_,
combine_different_dtypes_);
};
auto combine_fn =
[&](absl::Span<HloInstruction* const> to_combine) -> absl::Status {
return CombineAllGathers(to_combine, combine_by_dim_);
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<GroupKey>(computation, key_fn, combine_fn,
combine_threshold_in_bytes_,
combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
absl::StatusOr<bool> AllGatherCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
bool changed, RunWithKeyCombiner(module, execution_threads, CombineKey));
return changed;
}
} | #include "xla/service/all_gather_combiner.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::Matcher;
namespace op = xla::testing::opcode_matchers;
int64_t kMaxCombineCount = 256;
std::vector<HloAllGatherInstruction*> FindAllGathers(const HloModule& module) {
std::vector<HloAllGatherInstruction*> results;
for (HloComputation* computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (HloInstruction* hlo : computation->instructions()) {
if (auto it = DynCast<HloAllGatherInstruction>(hlo)) {
results.push_back(it);
}
}
}
return results;
}
int64_t AllGatherCount(const HloModule& module) {
return FindAllGathers(module).size();
}
using AllGatherCombinerTest = HloTestBase;
TEST_F(AllGatherCombinerTest, CombineAllGathers) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = f32[32] parameter(1)
allgather0 = f32[128] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[128] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[128], f32[128]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather =
op::AllGather(op::Parameter(0), op::Parameter(1));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1)));
}
TEST_F(AllGatherCombinerTest, CombineDifferentDtypes) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = s32[32] parameter(1)
allgather0 = f32[128] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = s32[128] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[128], s32[128]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true,
false);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllGather(op::Parameter(0)),
op::AllGather(op::Parameter(1))));
}
TEST_F(AllGatherCombinerTest, CombineAllGathersByAllGatherDimension) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,2] parameter(0)
param1 = f32[2,2] parameter(1)
param2 = f32[2,2] parameter(2)
param3 = f32[2,2] parameter(3)
param4 = f32[2,2] parameter(4)
allgather0 = f32[8,2] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[8,2] all-gather(param1), replica_groups={}, dimensions={0}
allgather2 = f32[2,8] all-gather(param2), replica_groups={}, dimensions={1}
allgather3 = f32[2,8] all-gather(param3), replica_groups={}, dimensions={1}
allgather4 = f32[8,2] all-gather(param4), replica_groups={}, dimensions={0}
ROOT tuple = (f32[8,2], f32[8,2], f32[2,8], f32[2,8], f32[8,2])
tuple(allgather0, allgather1, allgather2, allgather3, allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather0 =
op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4));
Matcher<const HloInstruction*> combined_all_gather1 =
op::AllGather(op::Parameter(2), op::Parameter(3));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather0, 0),
op::GetTupleElement(combined_all_gather0, 1),
op::GetTupleElement(combined_all_gather1, 0),
op::GetTupleElement(combined_all_gather1, 1),
op::GetTupleElement(combined_all_gather0, 2)));
}
TEST_F(AllGatherCombinerTest, DoNotCombineOverThreshold) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[8] parameter(0)
param1 = f32[8] parameter(1)
allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(255, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, CombineUpToThreshold) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[8] parameter(0)
param1 = f32[8] parameter(1)
allgather0 = f32[32] all-gather(param0), replica_groups={}, dimensions={0}
allgather1 = f32[32] all-gather(param1), replica_groups={}, dimensions={0}
ROOT tuple = (f32[32], f32[32]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(256, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 1);
EXPECT_TRUE(changed);
}
TEST_F(AllGatherCombinerTest, NoDependentCombination) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param = f32[1] parameter(0)
allgather0 = f32[2] all-gather(param), replica_groups={}, dimensions={0}
ROOT allgather1 = f32[4] all-gather(allgather0), replica_groups={},
dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, NoDifferentReplicaGroupsCombination) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0)
param1 = f32[32] parameter(1)
allgather0 = f32[64] all-gather(param0), replica_groups={{0, 1}, {2, 3}},
dimensions={0}
allgather1 = f32[64] all-gather(param1), replica_groups={{0, 2}, {1, 3}},
dimensions={0}
ROOT tuple = (f32[64], f32[64]) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, DomainPreventsCombining) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0), sharding={maximal device=0}
param1 = f32[32] parameter(1), sharding={maximal device=1}
allgather0 = f32[128] all-gather(param0),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
allgather1 = f32[128] all-gather(param1),
replica_groups={}, dimensions={0}, sharding={maximal device=1}
domain0 = f32[128] domain(allgather0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}},
exit={maximal device=0}}
domain1 = f32[128] domain(allgather1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}},
exit={maximal device=1}}
ROOT tuple = (f32[128], f32[128]) tuple(domain0, domain1),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllGatherCombinerTest, CombineFromTwoDomainsWithSameMetadata) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[32] parameter(0), sharding={maximal device=0}
param1 = f32[32] parameter(1), sharding={maximal device=1}
param2 = f32[32] parameter(2), sharding={maximal device=1}
allgather0 = f32[128] all-gather(param0),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
allgather1 = f32[128] all-gather(param1),
replica_groups={}, dimensions={0}, sharding={maximal device=1}
allgather2 = f32[128] all-gather(param2),
replica_groups={}, dimensions={0}, sharding={maximal device=0}
domain0 = f32[128] domain(allgather0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
domain1 = f32[128] domain(allgather1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=1}}
domain2 = f32[128] domain(allgather2),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
ROOT tuple = (f32[128], f32[128], f32[128]) tuple(domain0, domain1,
domain2),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 3);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllGatherCount(*module), 2);
EXPECT_TRUE(changed);
const HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
ASSERT_EQ(param0->user_count(), 1);
const HloInstruction* combined_ag = param0->users().front();
ASSERT_EQ(combined_ag->opcode(), HloOpcode::kAllGather);
EXPECT_THAT(combined_ag,
op::Sharding("{{maximal device=0}, {maximal device=0}}"));
}
TEST_F(AllGatherCombinerTest, CombineAllGathersDifferentDims) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,3]{1,0} parameter(0)
param1 = f32[2,3]{0,1} parameter(1)
allgather0 = f32[8,3]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[2,12]{0,1} all-gather(param1), replica_groups={},
dimensions={1}
ROOT tuple = (f32[8,3]{1,0}, f32[2,12]{0,1}) tuple(allgather0, allgather1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather =
op::AllGather(op::Parameter(0), op::Bitcast(op::Parameter(1)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::Bitcast(op::GetTupleElement(combined_all_gather, 1))));
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDims) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6,11]{1,0} parameter(4)
allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)),
op::Bitcast(op::Parameter(3)), op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsRank4) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7,2,7]{3,2,1,0} parameter(0)
param1 = f32[3,8,3,8]{3,2,1,0} parameter(1)
param2 = f32[4,9,4,9]{3,0,1,2} parameter(2)
param3 = f32[5,10,5,10]{3,0,1,2} parameter(3)
param4 = f32[6,11,6,11]{3,2,1,0} parameter(4)
allgather0 = f32[8,7,2,7]{3,2,1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8,3,8]{3,2,1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,9,16,9]{3,0,1,2} all-gather(param2), replica_groups={},
dimensions={2}
allgather3 = f32[5,10,20,10]{3,0,1,2} all-gather(param3), replica_groups={},
dimensions={2}
allgather4 = f32[24,11,6,11]{3,2,1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7,2,7]{3,2,1,0}, f32[12,8,3,8]{3,2,1,0},
f32[4,9,16,9]{3,0,1,2}, f32[5,10,20,10]{3,0,1,2},
f32[24,11,6,11]{3,2,1,0}) tuple(allgather0, allgather1, allgather2,
allgather3, allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Parameter(0), op::Parameter(1), op::Bitcast(op::Parameter(2)),
op::Bitcast(op::Parameter(3)), op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather, 0),
op::GetTupleElement(combined_all_gather, 1),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineManyAllGathersDifferentDimsMixedRanks) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6]{0} parameter(4)
allgather0 = f32[2,28]{1,0} all-gather(param0), replica_groups={},
dimensions={1}
allgather1 = f32[3,32]{1,0} all-gather(param1), replica_groups={},
dimensions={1}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24]{0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[2,28]{1,0}, f32[3,32]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24]{0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
false);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather = op::AllGather(
op::Bitcast(op::Parameter(0)), op::Bitcast(op::Parameter(1)),
op::Bitcast(op::Parameter(2)), op::Bitcast(op::Parameter(3)),
op::Parameter(4));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Bitcast(op::GetTupleElement(combined_all_gather, 0)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 1)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 2)),
op::Bitcast(op::GetTupleElement(combined_all_gather, 3)),
op::GetTupleElement(combined_all_gather, 4)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(1, all_gathers.size());
ASSERT_EQ(0, all_gathers.front()->all_gather_dimension());
}
TEST_F(AllGatherCombinerTest, CombineAllGathersByDim) {
const char* const hlo_string = R"(
HloModule Module
ENTRY entry {
param0 = f32[2,7]{1,0} parameter(0)
param1 = f32[3,8]{1,0} parameter(1)
param2 = f32[4,9]{0,1} parameter(2)
param3 = f32[5,10]{0,1} parameter(3)
param4 = f32[6,11]{1,0} parameter(4)
allgather0 = f32[8,7]{1,0} all-gather(param0), replica_groups={},
dimensions={0}
allgather1 = f32[12,8]{1,0} all-gather(param1), replica_groups={},
dimensions={0}
allgather2 = f32[4,36]{0,1} all-gather(param2), replica_groups={},
dimensions={1}
allgather3 = f32[5,40]{0,1} all-gather(param3), replica_groups={},
dimensions={1}
allgather4 = f32[24,11]{1,0} all-gather(param4), replica_groups={},
dimensions={0}
ROOT tuple = (f32[8,7]{1,0}, f32[12,8]{1,0}, f32[4,36]{0,1}, f32[5,40]{0,1},
f32[24,11]{1,0}) tuple(allgather0, allgather1, allgather2, allgather3,
allgather4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllGatherCombiner combine(1024 * 1024, kMaxCombineCount,
true);
ASSERT_EQ(AllGatherCount(*module), 5);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_TRUE(changed);
Matcher<const HloInstruction*> combined_all_gather_0 =
op::AllGather(op::Parameter(0), op::Parameter(1), op::Parameter(4));
Matcher<const HloInstruction*> combined_all_gather_1 =
op::AllGather(op::Parameter(2), op::Parameter(3));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(combined_all_gather_0, 0),
op::GetTupleElement(combined_all_gather_0, 1),
op::GetTupleElement(combined_all_gather_1, 0),
op::GetTupleElement(combined_all_gather_1, 1),
op::GetTupleElement(combined_all_gather_0, 2)));
std::vector<HloAllGatherInstruction*> all_gathers = FindAllGathers(*module);
ASSERT_EQ(2, all_gathers.size());
ASSERT_EQ(0, all_gathers[0]->all_gather_dimension());
ASSERT_EQ(1, all_gathers[1]->all_gather_dimension());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
42871c5d-e2a7-4409-b141-155d8cd6cb8c | cpp | tensorflow/tensorflow | batched_gather_scatter_normalizer | third_party/xla/xla/service/batched_gather_scatter_normalizer.cc | third_party/xla/xla/service/batched_gather_scatter_normalizer_test.cc | #include "xla/service/batched_gather_scatter_normalizer.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
bool IsBatchGather(const HloGatherInstruction* gather) {
const auto& dims = gather->gather_dimension_numbers();
return !dims.operand_batching_dims().empty();
}
bool IsBatchScatter(const HloScatterInstruction* scatter) {
const auto& dims = scatter->scatter_dimension_numbers();
return !dims.input_batching_dims().empty();
}
PrimitiveType PromoteTypeForSize(PrimitiveType type, int64_t size) {
if (!primitive_util::IsIntegralType(type) ||
primitive_util::FitsInIntegralType(size, type)) {
return type;
}
if (primitive_util::FitsInIntegralType(size, PrimitiveType::S32)) {
return PrimitiveType::S32;
}
return PrimitiveType::S64;
}
bool GetUpdatedIndicesAreSorted(bool indices_are_sorted,
absl::Span<const int64_t> indices_batching_dims,
absl::Span<const int64_t> updated_index_map) {
return indices_are_sorted && absl::c_is_sorted(indices_batching_dims) &&
absl::c_is_sorted(updated_index_map);
}
HloInstruction* CreateConcatIndices(
HloInstruction* inst, HloInstruction* indices, int64_t index_vector_dim,
absl::Span<const int64_t> indices_batching_dims,
BatchedGatherScatterNormalizer* normalizer) {
PrimitiveType element_type = indices->shape().element_type();
for (int64_t indices_batching_dim : indices_batching_dims) {
element_type = PromoteTypeForSize(
element_type, indices->shape().dimensions(indices_batching_dim));
}
if (element_type != indices->shape().element_type()) {
Shape indices_shape = indices->shape();
indices_shape.set_element_type(element_type);
indices = inst->parent()->AddInstruction(
HloInstruction::CreateConvert(indices_shape, indices));
}
Shape iota_shape = indices->shape();
const bool index_vector_dim_on_last_dim =
index_vector_dim == iota_shape.rank();
if (index_vector_dim_on_last_dim) {
std::vector<int64_t> dimensions(iota_shape.dimensions().begin(),
iota_shape.dimensions().end());
dimensions.push_back(1);
iota_shape = ShapeUtil::MakeShape(element_type, dimensions);
indices = inst->AddInstruction(
HloInstruction::CreateReshape(iota_shape, indices));
}
iota_shape.set_dimensions(index_vector_dim, 1);
normalizer->UpdateLayout(&iota_shape);
std::vector<HloInstruction*> indices_to_concat;
indices_to_concat.reserve(indices_batching_dims.size() + 1);
for (int64_t indices_batching_dim : indices_batching_dims) {
indices_to_concat.push_back(inst->parent()->AddInstruction(
HloInstruction::CreateIota(iota_shape, indices_batching_dim)));
}
indices_to_concat.push_back(indices);
Shape concat_shape = iota_shape;
concat_shape.set_dimensions(
index_vector_dim,
indices_batching_dims.size() +
(index_vector_dim_on_last_dim
? 1
: indices->shape().dimensions(index_vector_dim)));
normalizer->UpdateLayout(&concat_shape);
return inst->AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, indices_to_concat, index_vector_dim));
}
absl::StatusOr<HloInstruction*> NormalizeBatchGather(
HloGatherInstruction* gather, BatchedGatherScatterNormalizer* normalizer) {
HloInstruction* gather_operand = gather->mutable_operand(0);
HloInstruction* gather_indices = gather->mutable_operand(1);
const auto& dims = gather->gather_dimension_numbers();
CHECK_EQ(dims.operand_batching_dims_size(),
dims.start_indices_batching_dims_size());
std::vector<int64_t> start_index_map(dims.operand_batching_dims().begin(),
dims.operand_batching_dims().end());
absl::c_copy(dims.start_index_map(), std::back_inserter(start_index_map));
gather_indices =
CreateConcatIndices(gather, gather_indices, dims.index_vector_dim(),
dims.start_indices_batching_dims(), normalizer);
std::vector<int64_t> collapsed_slice_dims(dims.collapsed_slice_dims().begin(),
dims.collapsed_slice_dims().end());
absl::c_copy(dims.operand_batching_dims(),
std::back_inserter(collapsed_slice_dims));
absl::c_sort(collapsed_slice_dims);
GatherDimensionNumbers updated_dims =
HloGatherInstruction::MakeGatherDimNumbers(
dims.offset_dims(), collapsed_slice_dims, start_index_map,
dims.index_vector_dim());
return gather->AddInstruction(HloInstruction::CreateGather(
gather->shape(), gather_operand, gather_indices, updated_dims,
gather->gather_slice_sizes(),
GetUpdatedIndicesAreSorted(gather->indices_are_sorted(),
dims.start_indices_batching_dims(),
start_index_map)));
}
absl::StatusOr<HloInstruction*> NormalizeBatchScatter(
HloScatterInstruction* scatter,
BatchedGatherScatterNormalizer* normalizer) {
auto scatter_operands = scatter->scatter_operands();
HloInstruction* scatter_indices = scatter->scatter_indices();
auto scatter_updates = scatter->scatter_updates();
const auto& dims = scatter->scatter_dimension_numbers();
CHECK_EQ(dims.input_batching_dims_size(),
dims.scatter_indices_batching_dims_size());
std::vector<int64_t> scatter_dims_to_operand_dims(
dims.input_batching_dims().begin(), dims.input_batching_dims().end());
absl::c_copy(dims.scatter_dims_to_operand_dims(),
std::back_inserter(scatter_dims_to_operand_dims));
scatter_indices =
CreateConcatIndices(scatter, scatter_indices, dims.index_vector_dim(),
dims.scatter_indices_batching_dims(), normalizer);
std::vector<int64_t> inserted_window_dims(dims.inserted_window_dims().begin(),
dims.inserted_window_dims().end());
absl::c_copy(dims.input_batching_dims(),
std::back_inserter(inserted_window_dims));
absl::c_sort(inserted_window_dims);
ScatterDimensionNumbers updated_dims =
HloScatterInstruction::MakeScatterDimNumbers(
dims.update_window_dims(), inserted_window_dims,
scatter_dims_to_operand_dims, dims.index_vector_dim());
return scatter->AddInstruction(HloInstruction::CreateScatter(
scatter->shape(), scatter_operands, scatter_indices, scatter_updates,
scatter->to_apply(), updated_dims,
GetUpdatedIndicesAreSorted(scatter->indices_are_sorted(),
dims.scatter_indices_batching_dims(),
scatter_dims_to_operand_dims),
scatter->unique_indices()));
}
}
absl::StatusOr<HloInstruction*>
BatchedGatherScatterNormalizer::ExpandInstruction(HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kGather) {
auto* gather = DynCast<HloGatherInstruction>(inst);
return NormalizeBatchGather(gather, this);
}
if (inst->opcode() == HloOpcode::kScatter) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return NormalizeBatchScatter(scatter, this);
}
return absl::InvalidArgumentError(absl::StrFormat(
"Instruction: %s is not a batch gather or scatter.", inst->ToString()));
}
bool BatchedGatherScatterNormalizer::InstructionMatchesPattern(
HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kGather) {
auto* gather = DynCast<HloGatherInstruction>(inst);
return IsBatchGather(gather);
}
if (inst->opcode() == HloOpcode::kScatter) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return IsBatchScatter(scatter);
}
return false;
}
} | #include "xla/service/batched_gather_scatter_normalizer.h"
#include <optional>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class BatchedGatherScatterNormalizerTest : public HloTestBase {};
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchGather) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,512], start_indices: s64[10,9,8,7,5,512]) -> f32[10,9,8,7,30,29,28,27,26,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0}
gather(f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,512]{5,4,3,2,1,0} %start_indices),
offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, operand_batching_dims={5},
start_indices_batching_dims={5}, index_vector_dim=4, slice_sizes={30,29,28,27,26,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,512]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,512]{{.*}} concatenate(%[[IOTA]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,30,29,28,27,26,512]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={4,5,6,7,8},
CHECK-SAME: collapsed_slice_dims={5},
CHECK-SAME: start_index_map={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: slice_sizes={30,29,28,27,26,1}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchGather2) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0}, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,512,1024,100], start_indices: s64[10,9,8,7,6,512,1024]) -> f32[10,9,8,7,30,29,28,27,26,512,1024] {
%input_tensor = f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0}
gather(f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} %start_indices),
offset_dims={4,5,6,7,8}, collapsed_slice_dims={7}, start_index_map={0,1,2,3,4,7}, operand_batching_dims={5,6},
start_indices_batching_dims={5,6}, index_vector_dim=4, slice_sizes={30,29,28,27,26,1,1,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=5
CHECK: %[[IOTA2:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=6
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,8,512,1024]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,30,29,28,27,26,512,1024]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={4,5,6,7,8},
CHECK-SAME: collapsed_slice_dims={5,6,7},
CHECK-SAME: start_index_map={5,6,0,1,2,3,4,7},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: slice_sizes={30,29,28,27,26,1,1,1}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesBecomeUnsorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[3,4,1]{2,1,0})->f32[3,4,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[3,4,1]) -> f32[3,4,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[3,4,1]{2,1,0} parameter(1)
ROOT %gather = f32[3,4,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[3,4,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={1}, start_index_map={1}, operand_batching_dims={0,2},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=true
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[3,4,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[3,4,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[3,4,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[3,4,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,2,1},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-NOT: indices_are_sorted
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesBecomeUnsorted2) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[3,2,1]{2,1,0})->f32[3,2,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[3,2,1]) -> f32[3,2,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[3,2,1]{2,1,0} parameter(1)
ROOT %gather = f32[3,2,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[3,2,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={2}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={1,0}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=true
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[3,2,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[IOTA2:.*]] = s64[3,2,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[INDICES_CONCAT:.*]] = s64[3,2,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[3,2,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-NOT: indices_are_sorted
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesRemainSorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[2,3,1]{2,1,0})->f32[2,3,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[2,3,1]) -> f32[2,3,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[2,3,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,3,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[2,3,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={2}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=true
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[2,3,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[2,3,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-SAME: indices_are_sorted=true
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchGatherIndicesRemainUnsorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[2,3,1]{2,1,0})->f32[2,3,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,3,4,512], start_indices: s64[2,3,1]) -> f32[2,3,5] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%start_indices = s64[2,3,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,3,5]{2,1,0}
gather(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[2,3,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={2}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,1,5},
indices_are_sorted=false
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[2,3,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[2,3,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,1,5}
CHECK-NOT: indices_are_sorted
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchGatherDimSizeZero) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,0]{5,4,3,2,1,0}, s64[10,9,8,7,5,0]{5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,0], start_indices: s64[10,9,8,7,5,0]) -> f32[10,9,8,7,30,29,28,27,26,0] {
%input_tensor = f32[50,49,48,47,46,0]{5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5,0]{5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0}
gather(f32[50,49,48,47,46,0]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,0]{5,4,3,2,1,0} %start_indices),
offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, operand_batching_dims={5},
start_indices_batching_dims={5}, index_vector_dim=4, slice_sizes={30,29,28,27,26,0}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,0]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,0]{{.*}} concatenate(%[[IOTA]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,30,29,28,27,26,0]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={4,5,6,7,8},
CHECK-SAME: collapsed_slice_dims={5},
CHECK-SAME: start_index_map={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: slice_sizes={30,29,28,27,26,0}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchScatter) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,512]{5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,512], scatter_indices: s64[10,9,8,7,5,512], updates: f32[10,9,8,7,30,29,28,27,26,512]) -> f32[50,49,48,47,46,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,512]{5,4,3,2,1,0} scatter(
f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor,
s64[10,9,8,7,5,512]{5,4,3,2,1,0} %scatter_indices,
f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} %updates),
update_window_dims={4,5,6,7,8}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1,2,3,4}, input_batching_dims={5},
scatter_indices_batching_dims={5}, index_vector_dim=4, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,512]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,512]{{.*}} concatenate(%[[IOTA]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[50,49,48,47,46,512]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={4,5,6,7,8},
CHECK-SAME: inserted_window_dims={5},
CHECK-SAME: scatter_dims_to_operand_dims={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchScatter2) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0}, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,512,1024,100], scatter_indices: s64[10,9,8,7,6,512,1024], updates: f32[10,9,8,7,30,29,28,27,26,512,1024]) -> f32[50,49,48,47,46,512,1024,100] {
%input_tensor = f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} scatter(
f32[50,49,48,47,46,512,1024,100]{7,6,5,4,3,2,1,0} %input_tensor,
s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} %scatter_indices,
f32[10,9,8,7,30,29,28,27,26,512,1024]{10,9,8,7,6,5,4,3,2,1,0} %updates),
update_window_dims={4,5,6,7,8}, inserted_window_dims={7},
scatter_dims_to_operand_dims={0,1,2,3,4,7}, input_batching_dims={5,6},
scatter_indices_batching_dims={5,6}, index_vector_dim=4, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=5
CHECK: %[[IOTA2:.*]] = s64[10,9,8,7,1,512,1024]{{.*}} iota(), iota_dimension=6
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,8,512,1024]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[50,49,48,47,46,512,1024,100]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={4,5,6,7,8},
CHECK-SAME: inserted_window_dims={5,6,7},
CHECK-SAME: scatter_dims_to_operand_dims={5,6,0,1,2,3,4,7},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
NormalizeBatchScatterIndicesRemainSorted) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[2,3,4,512]{3,2,1,0}, s64[2,3,1]{2,1,0}, f32[2,3,5]{2,1,0})->f32[2,3,4,512]{3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[2,3,4,512], scatter_indices: s64[2,3,1], updates: f32[2,3,5]) -> f32[2,3,4,512] {
%input_tensor = f32[2,3,4,512]{3,2,1,0} parameter(0)
%scatter_indices = s64[2,3,1]{2,1,0} parameter(1)
%updates = f32[2,3,5]{2,1,0} parameter(2)
ROOT %scatter = f32[2,3,4,512]{3,2,1,0}
scatter(f32[2,3,4,512]{3,2,1,0} %input_tensor, s64[2,3,1]{2,1,0} %scatter_indices, f32[2,3,5]{2,1,0} %updates),
update_window_dims={2}, inserted_window_dims={2}, scatter_dims_to_operand_dims={2}, input_batching_dims={0,1},
scatter_indices_batching_dims={0,1}, index_vector_dim=2, indices_are_sorted=true, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2,3,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s64[2,3,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[2,3,4,512]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={2},
CHECK-SAME: inserted_window_dims={0,1,2},
CHECK-SAME: scatter_dims_to_operand_dims={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: indices_are_sorted=true
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, NormalizeBatchScatterDimSizeZero) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,0]{5,4,3,2,1,0}, s64[10,9,8,7,5,0]{5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,0]{5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,0], scatter_indices: s64[10,9,8,7,5,0], updates: f32[10,9,8,7,30,29,28,27,26,0]) -> f32[50,49,48,47,46,0] {
%input_tensor = f32[50,49,48,47,46,0]{5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5,0]{5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,0]{5,4,3,2,1,0} scatter(
f32[50,49,48,47,46,0]{5,4,3,2,1,0} %input_tensor,
s64[10,9,8,7,5,0]{5,4,3,2,1,0} %scatter_indices,
f32[10,9,8,7,30,29,28,27,26,0]{9,8,7,6,5,4,3,2,1,0} %updates),
update_window_dims={4,5,6,7,8}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1,2,3,4}, input_batching_dims={5},
scatter_indices_batching_dims={5}, index_vector_dim=4, to_apply=%add_F32.v3
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA:.*]] = s64[10,9,8,7,1,0]{{.*}} iota(), iota_dimension=5
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,0]{{.*}} concatenate(%[[IOTA]], %scatter_indices)
CHECK: ROOT %[[SCATTER:.*]] = f32[50,49,48,47,46,0]{{.*}} scatter(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]], %updates),
CHECK-SAME: update_window_dims={4,5,6,7,8},
CHECK-SAME: inserted_window_dims={5},
CHECK-SAME: scatter_dims_to_operand_dims={5,0,1,2,3,4},
CHECK-SAME: index_vector_dim=4,
CHECK-SAME: to_apply=%add_F32.v3
)");
}
TEST_F(BatchedGatherScatterNormalizerTest, IndexVectorDimOnLastDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[50,512,1024]{2,1,0}, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0})->f32[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,512,1024], start_indices: s64[10,9,8,7,6,512,1024]) -> f32[10,9,8,7,6,512,1024] {
%input_tensor = f32[50,512,1024]{2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0}
gather(f32[50,512,1024]{2,1,0} %input_tensor, s64[10,9,8,7,6,512,1024]{6,5,4,3,2,1,0} %start_indices),
offset_dims={}, collapsed_slice_dims={0}, start_index_map={0}, operand_batching_dims={1,2},
start_indices_batching_dims={5,6}, index_vector_dim=7, slice_sizes={1,1,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[10,9,8,7,6,512,1024,1]{{.*}} iota(), iota_dimension=5
CHECK: %[[IOTA2:.*]] = s64[10,9,8,7,6,512,1024,1]{{.*}} iota(), iota_dimension=6
CHECK: %[[RESHAPE:.*]] = s64[10,9,8,7,6,512,1024,1]{{.*}} reshape(%start_indices)
CHECK: %[[INDICES_CONCAT:.*]] = s64[10,9,8,7,6,512,1024,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[RESHAPE]])
CHECK: ROOT %[[GATHER:.*]] = f32[10,9,8,7,6,512,1024]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={},
CHECK-SAME: collapsed_slice_dims={0,1,2},
CHECK-SAME: start_index_map={1,2,0},
CHECK-SAME: index_vector_dim=7,
CHECK-SAME: slice_sizes={1,1,1}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeDoesNotOverflowIndicesType) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,127,512]{2,1,0}, s8[2,127,1]{2,1,0})->f32[2,127,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,127,512], start_indices: s8[2,127,1]) -> f32[2,127,5] {
%input_tensor = f32[2,127,512]{2,1,0} parameter(0)
%start_indices = s8[2,127,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,127,5]{2,1,0}
gather(f32[2,127,512]{2,1,0} %input_tensor, s8[2,127,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s8[2,127,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s8[2,127,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[INDICES_CONCAT:.*]] = s8[2,127,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %start_indices)
CHECK: ROOT %[[GATHER:.*]] = f32[2,127,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeOverflowsIndicesType) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,128,512]{2,1,0}, s8[2,128,1]{2,1,0})->f32[2,128,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,128,512], start_indices: s8[2,128,1]) -> f32[2,128,5] {
%input_tensor = f32[2,128,512]{2,1,0} parameter(0)
%start_indices = s8[2,128,1]{2,1,0} parameter(1)
ROOT %gather = f32[2,128,5]{2,1,0}
gather(f32[2,128,512]{2,1,0} %input_tensor, s8[2,128,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[CONVERT:.*]] = s32[2,128,1]{{.*}} convert(%start_indices)
CHECK: %[[INDICES_CONCAT:.*]] = s32[2,128,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[CONVERT]])
CHECK: ROOT %[[GATHER:.*]] = f32[2,128,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeOverflowsIndicesTypeAndS32) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2147483648,2,512]{2,1,0}, s8[2147483648,2,1]{2,1,0})->f32[2147483648,2,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2147483648,2,512], start_indices: s8[2147483648,2,1]) -> f32[2147483648,2,5] {
%input_tensor = f32[2147483648,2,512]{2,1,0} parameter(0)
%start_indices = s8[2147483648,2,1]{2,1,0} parameter(1)
ROOT %gather = f32[2147483648,2,5]{2,1,0}
gather(f32[2147483648,2,512]{2,1,0} %input_tensor, s8[2147483648,2,1]{2,1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s64[2147483648,2,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s64[2147483648,2,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[CONVERT:.*]] = s64[2147483648,2,1]{{.*}} convert(%start_indices)
CHECK: %[[INDICES_CONCAT:.*]] = s64[2147483648,2,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[CONVERT]])
CHECK: ROOT %[[GATHER:.*]] = f32[2147483648,2,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
TEST_F(BatchedGatherScatterNormalizerTest,
BatchingDimSizeOverflowsAndIndexVectorDimOnLastDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule StringifyGather, entry_computation_layout={(f32[2,128,512]{2,1,0}, s8[2,128]{1,0})->f32[2,128,5]{2,1,0}}
ENTRY %Gather (input_tensor: f32[2,128,512], start_indices: s8[2,128]) -> f32[2,128,5] {
%input_tensor = f32[2,128,512]{2,1,0} parameter(0)
%start_indices = s8[2,128]{1,0} parameter(1)
ROOT %gather = f32[2,128,5]{2,1,0}
gather(f32[2,128,512]{2,1,0} %input_tensor, s8[2,128]{1,0} %start_indices),
offset_dims={2}, collapsed_slice_dims={}, start_index_map={2}, operand_batching_dims={0,1},
start_indices_batching_dims={0,1}, index_vector_dim=2, slice_sizes={1,1,5}
})";
RunAndFilecheckHloRewrite(kModuleStr, BatchedGatherScatterNormalizer(), R"(
CHECK: %[[IOTA1:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=0
CHECK: %[[IOTA2:.*]] = s32[2,128,1]{{.*}} iota(), iota_dimension=1
CHECK: %[[CONVERT:.*]] = s32[2,128]{{.*}} convert(%start_indices)
CHECK: %[[RESHAPE:.*]] = s32[2,128,1]{{.*}} reshape(%[[CONVERT]])
CHECK: %[[INDICES_CONCAT:.*]] = s32[2,128,3]{{.*}} concatenate(%[[IOTA1]], %[[IOTA2]], %[[RESHAPE]])
CHECK: ROOT %[[GATHER:.*]] = f32[2,128,5]{{.*}} gather(
CHECK-SAME: %input_tensor, %[[INDICES_CONCAT]]),
CHECK-SAME: offset_dims={2},
CHECK-SAME: collapsed_slice_dims={0,1},
CHECK-SAME: start_index_map={0,1,2},
CHECK-SAME: index_vector_dim=2,
CHECK-SAME: slice_sizes={1,1,5}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batched_gather_scatter_normalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batched_gather_scatter_normalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67ce4b54-0758-4b23-907b-85eb35a6a2ae | cpp | tensorflow/tensorflow | convolution_group_converter | third_party/xla/xla/service/convolution_group_converter.cc | third_party/xla/xla/service/convolution_group_converter_test.cc | #include "xla/service/convolution_group_converter.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
class ConvolutionVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleBatchGroupCount(HloInstruction* convolution);
static bool Run(HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion);
const bool changed() const { return changed_; }
~ConvolutionVisitor() override = default;
private:
explicit ConvolutionVisitor(
HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion)
: computation_(computation),
filter_expansion_(filter_expansion),
convert_batch_groups_only_(convert_batch_groups_only),
should_expand_(should_expand),
is_cost_viable_(is_cost_viable) {}
HloComputation* computation_;
bool changed_ = false;
bool filter_expansion_;
bool convert_batch_groups_only_;
std::function<bool(HloInstruction*)> should_expand_;
std::function<bool(HloInstruction*)> is_cost_viable_;
};
bool ConvolutionVisitor::Run(
HloComputation* computation,
std::function<bool(HloInstruction*)> should_expand,
std::function<bool(HloInstruction*)> is_cost_viable,
bool convert_batch_groups_only, bool filter_expansion) {
ConvolutionVisitor visitor(computation, should_expand, is_cost_viable,
convert_batch_groups_only, filter_expansion);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
Shape ExpandedFilterShape(const Shape& shape, int64_t group_count,
int64_t input_feature_dim) {
int64_t num_dims = shape.dimensions_size();
CHECK_GE(num_dims, 2);
Shape expanded_shape = shape;
expanded_shape.set_dimensions(
input_feature_dim, shape.dimensions(input_feature_dim) * group_count);
return expanded_shape;
}
std::vector<int32_t> GetMaskIds(int64_t group_size, int64_t group_count) {
std::vector<int32_t> values;
values.reserve(group_count * group_size);
for (int i = 0; i < group_count; ++i) {
for (int j = 0; j < group_size; ++j) {
values.push_back(i);
}
}
return values;
}
HloInstruction* GetExpandedFilterMask(
const Shape& filter_shape, int64_t kernel_input_feature_dim,
int64_t kernel_output_feature_dim, int64_t group_count,
const std::function<HloInstruction*(std::unique_ptr<HloInstruction>)>&
add_instruction) {
Shape expanded_filter_shape =
ExpandedFilterShape(filter_shape, group_count, kernel_input_feature_dim);
Shape mask_shape =
ShapeUtil::MakeShape(S32, expanded_filter_shape.dimensions());
int64_t output_feature = filter_shape.dimensions(kernel_output_feature_dim);
int64_t group_size = filter_shape.dimensions(kernel_input_feature_dim);
const std::vector<int32_t> input_feature_filter_mask =
GetMaskIds(group_size, group_count);
const std::vector<int32_t> output_feature_filter_mask =
GetMaskIds(output_feature / group_count, group_count);
auto mask1 = add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(input_feature_filter_mask)));
auto broadcasted_mask1 = add_instruction(HloInstruction::CreateBroadcast(
mask_shape, mask1, {kernel_input_feature_dim}));
auto mask2 = add_instruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int32_t>(output_feature_filter_mask)));
auto broadcasted_mask2 = add_instruction(HloInstruction::CreateBroadcast(
mask_shape, mask2, {kernel_output_feature_dim}));
Shape predicate_shape =
ShapeUtil::MakeShape(PRED, expanded_filter_shape.dimensions());
return add_instruction(HloInstruction::CreateCompare(
predicate_shape, broadcasted_mask1, broadcasted_mask2,
ComparisonDirection::kEq));
}
absl::Status ConvolutionVisitor::HandleBatchGroupCount(
HloInstruction* convolution) {
auto dim_numbers = convolution->convolution_dimension_numbers();
auto activation = convolution->mutable_operand(0);
auto filter = convolution->mutable_operand(1);
int64_t batch_group_count = convolution->batch_group_count();
if (batch_group_count == 1 ||
(should_expand_ && !should_expand_(convolution))) {
return absl::OkStatus();
}
VLOG(2) << "Dealing with batch_group_count " << batch_group_count
<< " for convolution " << convolution->ToString() << "\n";
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation_->AddInstruction(std::move(inst));
};
int64_t input_batch_dimension = dim_numbers.input_batch_dimension();
const int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
int64_t output_batch_dimension = dim_numbers.output_batch_dimension();
int64_t output_feature_dimension = dim_numbers.output_feature_dimension();
const int64_t kernel_input_feature_dimension =
dim_numbers.kernel_input_feature_dimension();
const int64_t kernel_output_feature_dimension =
dim_numbers.kernel_output_feature_dimension();
const int64_t input_batch =
activation->shape().dimensions(input_batch_dimension);
const int64_t output_feature =
filter->shape().dimensions(kernel_output_feature_dimension);
if (output_feature != batch_group_count || input_batch != batch_group_count) {
std::vector<int64_t> input_sizes(activation->shape().dimensions().begin(),
activation->shape().dimensions().end());
input_sizes[input_batch_dimension] /= batch_group_count;
input_sizes.insert(input_sizes.begin() + input_batch_dimension,
batch_group_count);
activation = MakeReshapeHlo(input_sizes, activation).value();
for (auto& d : *dim_numbers.mutable_input_spatial_dimensions()) {
if (d > input_batch_dimension) {
++d;
}
}
dim_numbers.add_input_spatial_dimensions(input_batch_dimension);
dim_numbers.set_input_batch_dimension(input_batch_dimension + 1);
if (input_feature_dimension > input_batch_dimension) {
dim_numbers.set_input_feature_dimension(input_feature_dimension + 1);
}
std::vector<int64_t> kernel_sizes(filter->shape().dimensions().begin(),
filter->shape().dimensions().end());
kernel_sizes[kernel_output_feature_dimension] /= batch_group_count;
kernel_sizes.insert(kernel_sizes.begin() + kernel_output_feature_dimension,
batch_group_count);
filter = MakeReshapeHlo(kernel_sizes, filter).value();
for (auto& d : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (d > kernel_output_feature_dimension) {
++d;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dimension);
dim_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dimension + 1);
if (kernel_input_feature_dimension > kernel_output_feature_dimension) {
dim_numbers.set_kernel_input_feature_dimension(
kernel_input_feature_dimension + 1);
}
for (auto& d : *dim_numbers.mutable_output_spatial_dimensions()) {
if (d > output_feature_dimension) {
++d;
}
}
dim_numbers.add_output_spatial_dimensions(output_feature_dimension);
dim_numbers.set_output_feature_dimension(output_feature_dimension + 1);
if (output_batch_dimension > output_feature_dimension) {
dim_numbers.set_output_batch_dimension(output_batch_dimension + 1);
}
Window window = convolution->window();
auto window_dim = window.add_dimensions();
window_dim->set_base_dilation(batch_group_count);
window_dim->set_size(batch_group_count);
window_dim->set_stride(batch_group_count - 1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
HloInstruction* new_convolution =
MakeConvolveHlo(
activation, filter, convolution->feature_group_count(),
1, window, dim_numbers,
convolution->precision_config(),
convolution->shape().element_type())
.value();
convolution->SetupDerivedInstruction(new_convolution);
TF_CHECK_OK(computation_->ReplaceInstruction(
convolution,
MakeReshapeHlo(convolution->shape(), new_convolution).value()));
changed_ = true;
return absl::OkStatus();
}
VLOG(2) << "is_cost_viable_ " << is_cost_viable_(convolution);
const bool cost_too_high = !is_cost_viable_(convolution);
if (cost_too_high || filter_expansion_) {
HloInstruction* filter_mask =
GetExpandedFilterMask(convolution->shape(), output_batch_dimension,
output_feature_dimension, batch_group_count, add);
auto expanded_filter_shape = ExpandedFilterShape(
convolution->shape(), batch_group_count, output_batch_dimension);
VLOG(2) << "output_batch_dimension " << output_batch_dimension;
VLOG(2) << "New output shape of convolution "
<< expanded_filter_shape.ToString();
auto new_convolution = add(HloInstruction::CreateConvolve(
expanded_filter_shape, activation, filter,
1, 1,
convolution->window(), dim_numbers, convolution->precision_config()));
VLOG(2) << "Expanded convolution " << new_convolution->ToString();
auto zero = add(HloInstruction::CreateConstant(
LiteralUtil::Zero(expanded_filter_shape.element_type())));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));
auto new_filter = add(HloInstruction::CreateTernary(
expanded_filter_shape, HloOpcode::kSelect, filter_mask, new_convolution,
zero_filter));
PrimitiveType reduce_type = new_filter->shape().element_type();
auto reduce_window_shape = new_convolution->shape();
reduce_window_shape.set_dimensions(output_batch_dimension, 1);
if (primitive_util::BitWidth(reduce_type) < primitive_util::BitWidth(F32)) {
reduce_type = F32;
reduce_window_shape.set_element_type(F32);
Shape convert_shape = new_filter->shape();
convert_shape.set_element_type(F32);
new_filter =
add(HloInstruction::CreateConvert(convert_shape, new_filter));
}
auto zero_literal = LiteralUtil::Zero(reduce_type);
auto zero_scalar =
add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto reduce_function = [&]() -> HloComputation* {
HloComputation::Builder b("add_computation");
Shape shape = ShapeUtil::MakeShape(reduce_type, {});
auto lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "lhs"));
auto rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "rhs"));
auto scalar_op = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, lhs, rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
};
Window window;
for (int64_t i = 0; i < new_convolution->shape().dimensions_size(); ++i) {
auto* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
if (i == output_batch_dimension) {
dim->set_stride(batch_group_count);
dim->set_size(batch_group_count);
} else {
dim->set_stride(1);
dim->set_size(1);
}
}
auto reduce_window = add(HloInstruction::CreateReduceWindow(
reduce_window_shape, new_filter, zero_scalar, window,
reduce_function()));
Shape convert_back_shape = reduce_window->shape();
convert_back_shape.set_element_type(activation->shape().element_type());
auto reduce_window_converted =
HloInstruction::CreateConvert(convert_back_shape, reduce_window);
TF_CHECK_OK(computation_->ReplaceWithNewInstruction(
convolution, std::move(reduce_window_converted)));
changed_ = true;
}
return absl::OkStatus();
}
absl::Status ConvolutionVisitor::HandleConvolution(
HloInstruction* convolution) {
if (convert_batch_groups_only_) {
return HandleBatchGroupCount(convolution);
}
auto add = [&](std::unique_ptr<HloInstruction> inst) {
return computation_->AddInstruction(std::move(inst));
};
int64_t group_count = convolution->feature_group_count();
if (group_count == 1 || (should_expand_ && !should_expand_(convolution))) {
return absl::OkStatus();
}
changed_ = true;
ConvolutionDimensionNumbers dim_numbers =
convolution->convolution_dimension_numbers();
auto filter = convolution->mutable_operand(1);
int64_t kernel_input_feature_dim =
dim_numbers.kernel_input_feature_dimension();
int64_t group_size = filter->shape().dimensions(kernel_input_feature_dim);
int64_t kernel_output_feature_dim =
dim_numbers.kernel_output_feature_dimension();
auto expanded_filter_shape = ExpandedFilterShape(filter->shape(), group_count,
kernel_input_feature_dim);
HloInstruction* filter_mask =
GetExpandedFilterMask(filter->shape(), kernel_input_feature_dim,
kernel_output_feature_dim, group_count, add);
HloInstruction* expanded_filter;
if (group_size == 1) {
bool depthwise_separable =
(group_count == filter->shape().dimensions(kernel_output_feature_dim));
if (!filter_expansion_ && depthwise_separable) {
changed_ = false;
return absl::OkStatus();
}
VLOG(2) << "is_cost_viable_ " << is_cost_viable_(convolution);
if (!is_cost_viable_(convolution) || filter_expansion_) {
Shape reshaped_filter_shape =
ShapeUtil::DeleteDimension(kernel_input_feature_dim, filter->shape());
auto reshaped_filter =
add(HloInstruction::CreateReshape(reshaped_filter_shape, filter));
std::vector<int64_t> broadcast_dims;
for (int64_t i = 0; i < filter->shape().dimensions_size(); ++i) {
if (i == kernel_input_feature_dim) {
continue;
}
broadcast_dims.push_back(i);
}
expanded_filter = add(HloInstruction::CreateBroadcast(
expanded_filter_shape, reshaped_filter, broadcast_dims));
auto zero = add(HloInstruction::CreateConstant(
LiteralUtil::Zero(expanded_filter_shape.element_type())));
auto zero_filter =
add(HloInstruction::CreateBroadcast(expanded_filter_shape, zero, {}));
auto new_filter = add(HloInstruction::CreateTernary(
expanded_filter_shape, HloOpcode::kSelect, filter_mask,
expanded_filter, zero_filter));
auto new_convolution = HloInstruction::CreateConvolve(
convolution->shape(), convolution->mutable_operand(0), new_filter,
1, 1,
convolution->window(), dim_numbers, convolution->precision_config());
return computation_->ReplaceWithNewInstruction(
convolution, std::move(new_convolution));
}
std::vector<int64_t> new_filter_dimension;
new_filter_dimension.reserve(filter->shape().rank() + 1);
const int64_t depthwise_multiplier =
filter->shape().dimensions(kernel_output_feature_dim) / group_count;
for (int64_t i = 0; i < filter->shape().rank(); ++i) {
if (i == kernel_output_feature_dim) {
new_filter_dimension.push_back(group_count);
new_filter_dimension.push_back(depthwise_multiplier);
} else {
new_filter_dimension.push_back(filter->shape().dimensions(i));
}
}
if (kernel_input_feature_dim > kernel_output_feature_dim) {
dim_numbers.set_kernel_input_feature_dimension(kernel_input_feature_dim +
1);
}
for (auto& dim : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (dim > kernel_output_feature_dim) {
++dim;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dim + 1);
HloInstruction* new_filter =
computation_->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(filter->shape().element_type(),
new_filter_dimension),
filter));
auto new_activation_shape = convolution->operand(0)->shape();
dim_numbers.add_input_spatial_dimensions(new_activation_shape.rank());
ShapeUtil::AppendMajorDimension(1, &new_activation_shape);
HloInstruction* new_activation =
computation_->AddInstruction(HloInstruction::CreateReshape(
new_activation_shape, convolution->mutable_operand(0)));
auto new_window = convolution->window();
auto new_dim = new_window.add_dimensions();
new_dim->set_size(depthwise_multiplier);
new_dim->set_window_reversal(true);
new_dim->set_padding_low(depthwise_multiplier - 1);
new_dim->set_padding_high(depthwise_multiplier - 1);
new_dim->set_stride(1);
new_dim->set_window_dilation(1);
new_dim->set_base_dilation(1);
std::vector<int64_t> new_output_dimension;
new_output_dimension.reserve(convolution->shape().rank() + 1);
for (int64_t i = 0; i < convolution->shape().rank(); ++i) {
if (i == dim_numbers.output_feature_dimension()) {
new_output_dimension.push_back(group_count);
new_output_dimension.push_back(depthwise_multiplier);
} else {
new_output_dimension.push_back(convolution->shape().dimensions(i));
}
}
if (dim_numbers.output_batch_dimension() >
dim_numbers.output_feature_dimension()) {
dim_numbers.set_output_batch_dimension(
dim_numbers.output_batch_dimension() + 1);
}
for (auto& dim : *dim_numbers.mutable_output_spatial_dimensions()) {
if (dim > dim_numbers.output_feature_dimension()) {
++dim;
}
}
dim_numbers.add_output_spatial_dimensions(
dim_numbers.output_feature_dimension() + 1);
auto new_convolution_output_shape = ShapeUtil::MakeShape(
convolution->shape().element_type(), new_output_dimension);
HloInstruction* new_convolution =
computation_->AddInstruction(HloInstruction::CreateConvolve(
new_convolution_output_shape, new_activation, new_filter,
group_count, 1,
new_window, dim_numbers, convolution->precision_config()));
return computation_->ReplaceWithNewInstruction(
convolution,
HloInstruction::CreateReshape(convolution->shape(), new_convolution));
}
HloInstruction* activation = convolution->mutable_operand(0);
std::vector<int64_t> input_sizes(activation->shape().dimensions().begin(),
activation->shape().dimensions().end());
const int64_t input_feature_dimension = dim_numbers.input_feature_dimension();
input_sizes[input_feature_dimension] /= group_count;
input_sizes.insert(input_sizes.begin() + input_feature_dimension,
group_count);
activation = MakeReshapeHlo(input_sizes, activation).value();
for (auto& d : *dim_numbers.mutable_input_spatial_dimensions()) {
if (d > input_feature_dimension) {
++d;
}
}
dim_numbers.add_input_spatial_dimensions(input_feature_dimension);
dim_numbers.set_input_feature_dimension(input_feature_dimension + 1);
if (dim_numbers.input_batch_dimension() > input_feature_dimension) {
dim_numbers.set_input_batch_dimension(dim_numbers.input_batch_dimension() +
1);
}
std::vector<int64_t> kernel_sizes(filter->shape().dimensions().begin(),
filter->shape().dimensions().end());
const int64_t kernel_output_feature_dimension =
dim_numbers.kernel_output_feature_dimension();
kernel_sizes[kernel_output_feature_dimension] /= group_count;
kernel_sizes.insert(kernel_sizes.begin() + kernel_output_feature_dimension,
group_count);
filter = MakeReshapeHlo(kernel_sizes, filter).value();
for (auto& d : *dim_numbers.mutable_kernel_spatial_dimensions()) {
if (d > kernel_output_feature_dimension) {
++d;
}
}
dim_numbers.add_kernel_spatial_dimensions(kernel_output_feature_dimension);
dim_numbers.set_kernel_output_feature_dimension(
kernel_output_feature_dimension + 1);
if (dim_numbers.kernel_input_feature_dimension() >
kernel_output_feature_dimension) {
dim_numbers.set_kernel_input_feature_dimension(
dim_numbers.kernel_input_feature_dimension() + 1);
}
const int64_t output_feature_dimension =
dim_numbers.output_feature_dimension();
for (auto& d : *dim_numbers.mutable_output_spatial_dimensions()) {
if (d > output_feature_dimension) {
++d;
}
}
dim_numbers.add_output_spatial_dimensions(output_feature_dimension);
dim_numbers.set_output_feature_dimension(output_feature_dimension + 1);
if (dim_numbers.output_batch_dimension() > output_feature_dimension) {
dim_numbers.set_output_batch_dimension(
dim_numbers.output_batch_dimension() + 1);
}
Window window = convolution->window();
auto window_dim = window.add_dimensions();
window_dim->set_base_dilation(group_count);
window_dim->set_size(group_count);
window_dim->set_stride(group_count - 1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_reversal(false);
window_dim->set_window_dilation(1);
HloInstruction* new_convolution =
MakeConvolveHlo(
activation, filter, 1,
1, window, dim_numbers,
convolution->precision_config(),
convolution->shape().element_type())
.value();
convolution->SetupDerivedInstruction(new_convolution);
changed_ = true;
return computation_->ReplaceInstruction(
convolution,
MakeReshapeHlo(convolution->shape(), new_convolution).value());
}
}
absl::StatusOr<bool> ConvolutionGroupConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "ConvolutionGroupConverter::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (ConvolutionVisitor::Run(comp, should_expand_, is_cost_viable_,
convert_batch_groups_only_,
filter_expansion_)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "ConvolutionGroupConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/convolution_group_converter.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using ConvolutionGroupConverterTest = HloTestBase;
namespace op = testing::opcode_matchers;
TEST_F(ConvolutionGroupConverterTest,
ConvertFeatureGroupCountEqualToInputFeatureDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,2], filter: f32[1,1,2]) -> f32[1,2,2] {
%input = f32[1,2,2]{2,1,0} parameter(0)
%copy = f32[1,2,2]{2,0,1} copy(f32[1,2,2]{2,1,0} %input)
%filter = f32[1,1,2]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,2]{2,0,1} convolution(f32[1,2,2]{2,0,1} %copy, f32[1,1,2]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, feature_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return true; };
ConvolutionGroupConverter converter(should_expand, cost_model,
false);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->feature_group_count(), 1);
EXPECT_THAT(root->operand(1),
op::Select(op::Eq(op::Broadcast(op::Constant()),
op::Broadcast(op::Constant())),
op::Broadcast(op::Reshape(op::Parameter())),
op::Broadcast(op::Constant())));
}
TEST_F(ConvolutionGroupConverterTest,
ConvertFeatureGroupCountDivisorOfInputFeatureDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,4], filter: f32[1,2,2]) -> f32[1,2,2] {
%input = f32[1,2,4]{2,1,0} parameter(0)
%copy = f32[1,2,4]{2,0,1} copy(f32[1,2,4]{2,1,0} %input)
%filter = f32[1,2,2]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,2]{2,0,1} convolution(f32[1,2,4]{2,0,1} %copy, f32[1,2,2]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, feature_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return true; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
false);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->operand(0)->feature_group_count(), 1);
EXPECT_EQ(root->operand(0)->shape().rank(), 4);
}
TEST_F(ConvolutionGroupConverterTest,
ConvertBatchGroupCountEqualToInputBatchDim) {
std::string hlo_string = R"(HloModule Convolve1D1Window_0_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[16,19,19,512]{3,2,1,0}, filter: f32[16,19,19,512]{3,2,1,0}) -> f32[3,3,512,1]{3,2,1,0} {
%input = f32[16,19,19,512]{3,2,1,0} parameter(0)
%filter = f32[16,19,19,512]{3,2,1,0} parameter(1)
ROOT %convolution = f32[3,3,512,1]{3,2,1,0} convolution(f32[16,19,19,512]{3,2,1,0} %input, f32[16,19,19,512]{3,2,1,0} %filter), window={size=19x19 pad=1_1x1_1}, dim_labels=f01b_i01o->01fb, batch_group_count=512
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return false; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
true);
ASSERT_TRUE(converter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvert);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kReduceWindow);
}
TEST_F(ConvolutionGroupConverterTest,
ConvertBatchGroupCountNotEqualToInputBatchDim) {
std::string hlo_string = R"(HloModule m
ENTRY main {
%input = f32[1,1,1,4] parameter(0)
%filter = f32[1,1,1,2] parameter(1)
ROOT %convolution = f32[1,1,2,2] convolution(%input,%filter),
window={size=1x1}, dim_labels=f01b_i01o->01fb, batch_group_count=2
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
auto should_expand = [](HloInstruction* conv) { return true; };
auto cost_model = [](HloInstruction* conv) { return false; };
ConvolutionGroupConverter converter(should_expand,
cost_model,
true);
ASSERT_TRUE(converter.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_group_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_group_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22f6ce03-d609-4d36-bd27-5d70724af7e0 | cpp | tensorflow/tensorflow | cpu_gpu_shape_verifier | third_party/xla/xla/service/cpu_gpu_shape_verifier.cc | third_party/xla/xla/service/cpu_gpu_shape_verifier_test.cc | #include "xla/service/cpu_gpu_shape_verifier.h"
#include <array>
#include <string_view>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsAllowedS4U4CustomCall(const HloInstruction* instruction) {
static constexpr std::array<std::string_view, 1> kMetadataCustomCalls = {
"Sharding",
};
return absl::c_any_of(kMetadataCustomCalls, [&](std::string_view target) {
return target == instruction->custom_call_target();
});
}
absl::Status VerifyS4U4Usage(HloInstruction* instruction) {
auto verify_subshape = [](const HloInstruction* instruction) {
return ShapeUtil::ForEachSubshapeWithStatus(
instruction->shape(), [&](const Shape& shape, const ShapeIndex&) {
if (primitive_util::IsSubByteNonPredType(shape.element_type())) {
return absl::InvalidArgumentError(absl::StrFormat(
"%s is currently only supported in allow-listed instructions, "
"but got instruction: %s",
primitive_util::LowercasePrimitiveTypeName(
shape.element_type()),
instruction->ToString()));
}
return absl::OkStatus();
});
};
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kConstant:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kFusion:
case HloOpcode::kGetTupleElement:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
break;
case HloOpcode::kCustomCall:
if (IsAllowedS4U4CustomCall(instruction)) {
break;
}
ABSL_FALLTHROUGH_INTENDED;
default:
return verify_subshape(instruction);
}
return absl::OkStatus();
}
}
absl::Status CpuGpuShapeVerifier::Preprocess(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
hlo->shape(), [&](const Shape& shape, const ShapeIndex&) {
if (shape.has_layout()) {
if (LayoutUtil::IsSparseArray(shape)) {
return absl::InvalidArgumentError(absl::StrFormat(
"The XLA CPU/GPU backend does not support sparse shapes: %s",
hlo->ToString()));
}
if (!primitive_util::IsSubByteNonPredType(shape.element_type()) &&
shape.layout().element_size_in_bits() != 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"The XLA CPU/GPU backend does not support custom element sizes "
"on non-sub-byte-bit types: %s",
hlo->ToString()));
}
}
return absl::OkStatus();
}));
TF_RETURN_IF_ERROR(VerifyS4U4Usage(hlo));
return ShapeVerifier::Preprocess(hlo);
}
} | #include "xla/service/cpu_gpu_shape_verifier.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
class CpuGpuShapeVerifierTest : public HloTestBase {
public:
CpuGpuShapeVerifierTest() {
HloVerifierOpts opts;
std::unique_ptr<TargetVerifierMetadata> metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
hlo_verifier_ = std::make_unique<HloVerifier>(std::move(metadata));
}
};
TEST_F(CpuGpuShapeVerifierTest, Int4UnsupportedInstruction) {
const char* const hlo_string = R"(
HloModule Module
ENTRY main {
p0 = u4[2,5] parameter(0)
ROOT out = u4[2,5] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("u4 is currently only supported in allow-listed instructions"));
}
TEST_F(CpuGpuShapeVerifierTest, Int4SupportedInstruction) {
const char* const hlo_string = R"(
HloModule Module
bcast {
p0 = u4[] parameter(0)
ROOT out = u4[3, 3] broadcast(p0), dimensions={}
}
ENTRY main {
p0 = u4[] parameter(0)
ROOT out = u4[3, 3] call(p0), to_apply=bcast
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
TF_EXPECT_OK(status);
}
TEST_F(CpuGpuShapeVerifierTest, Int4ShardingCustomCall) {
const char* const hlo_string = R"(
HloModule Module
ENTRY main {
p0 = u4[] parameter(0)
ROOT sharded = u4[] custom-call(p0), custom_call_target="Sharding"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
TF_EXPECT_OK(status);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu_gpu_shape_verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu_gpu_shape_verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03566aeb-b4e9-429e-b346-edc1a5a6c3d5 | cpp | tensorflow/tensorflow | hlo_ordering | third_party/xla/xla/service/hlo_ordering.cc | third_party/xla/xla/service/hlo_ordering_test.cc | #include "xla/service/hlo_ordering.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
bool HloOrdering::ExecutesBefore(const HloInstruction* a,
const HloInstruction* b) const {
switch (GetExecutionConstraint(a, b)) {
case ExecutionConstraint::kIsSame:
return false;
case ExecutionConstraint::kRunBeforeStart:
case ExecutionConstraint::kRunBeforeEnd:
case ExecutionConstraint::kRunExclusiveBefore:
return true;
case ExecutionConstraint::kRunExclusiveAfter:
case ExecutionConstraint::kRunAfter:
case ExecutionConstraint::kUnordered:
return false;
}
}
HloOrdering::ExecutionConstraint HloOrdering::GetExecutionConstraint(
const HloInstruction* a, const HloInstruction* b) const {
auto is_async_wrapped = [](const HloInstruction* a, const HloInstruction* b) {
return a->IsAsynchronous() && a->async_wrapped_instruction() == b;
};
if (a == b || is_async_wrapped(a, b) || is_async_wrapped(b, a)) {
return ExecutionConstraint::kIsSame;
}
const HloInstruction* a_ancestor;
const HloInstruction* b_ancestor;
std::tie(a_ancestor, b_ancestor) =
call_graph_->NearestAncestorsInSameComputation(
const_cast<HloInstruction*>(a), const_cast<HloInstruction*>(b));
if (a_ancestor == nullptr) {
VLOG(4) << "Ancestors in a common computation could not be found between"
<< a->ToString() << "\n and \n"
<< b->ToString() << "\n so consider them to be unordered.\n";
return ExecutionConstraint::kUnordered;
}
CHECK_NE(b_ancestor, nullptr);
CHECK_EQ(a_ancestor->parent(), b_ancestor->parent());
if (a_ancestor == b_ancestor && a_ancestor->opcode() == HloOpcode::kWhile) {
const HloComputation* body = a_ancestor->while_body();
const HloComputation* condition = a_ancestor->while_condition();
if (call_graph_->InstructionIsNestedIn(a, condition) &&
call_graph_->InstructionIsNestedIn(b, body)) {
return ExecutionConstraint::kRunBeforeEnd;
}
}
if (a_ancestor == b_ancestor &&
(a_ancestor->opcode() == HloOpcode::kConditional)) {
int a_branch = -1;
int b_branch = -1;
for (int j = 0; j < a_ancestor->branch_count(); ++j) {
if (call_graph_->InstructionIsNestedIn(
a, a_ancestor->branch_computation(j))) {
a_branch = j;
}
if (call_graph_->InstructionIsNestedIn(
b, a_ancestor->branch_computation(j))) {
b_branch = j;
}
}
if (a_branch == -1 && b_branch == -1) {
CHECK_EQ(a, a_ancestor);
CHECK_EQ(b, b_ancestor);
CHECK_EQ(a, b);
return ExecutionConstraint::kIsSame;
}
if (b_branch == -1) {
CHECK_EQ(b, a_ancestor);
return ExecutionConstraint::kRunBeforeEnd;
}
if (a_branch == -1) {
CHECK_EQ(a, a_ancestor);
return ExecutionConstraint::kRunAfter;
}
if (a_branch < b_branch) {
return ExecutionConstraint::kRunExclusiveBefore;
}
if (b_branch < a_branch) {
return ExecutionConstraint::kRunExclusiveAfter;
}
}
if (ExecutesBeforeInSameComputation(a_ancestor, b_ancestor)) {
return ExecutionConstraint::kRunBeforeStart;
}
if (ExecutesBeforeInSameComputation(b_ancestor, a_ancestor)) {
return ExecutionConstraint::kRunAfter;
}
VLOG(1) << "Cannot determine order between:" << a->ToString() << "\n"
<< "and " << b->ToString() << " which are in the same computation\n";
return ExecutionConstraint::kUnordered;
}
bool HloOrdering::IsDefinedBefore(const HloValue& a, const HloValue& b) const {
const HloModule* module = b.defining_instruction()->GetModule();
if (b.defining_instruction()->parent() == module->entry_computation() &&
b.defining_instruction()->opcode() == HloOpcode::kParameter) {
return false;
}
if (a.defining_instruction()->parent() == module->entry_computation() &&
a.defining_instruction()->opcode() == HloOpcode::kParameter) {
return true;
}
auto is_body_or_condition_phi = [](const HloValue& v) {
return v.is_phi() &&
v.defining_instruction()->opcode() == HloOpcode::kParameter;
};
if (is_body_or_condition_phi(a) && !is_body_or_condition_phi(b) &&
call_graph_->InstructionIsNestedIn(b.defining_instruction(),
a.defining_instruction()->parent())) {
return true;
}
if (is_body_or_condition_phi(b) &&
call_graph_->InstructionIsNestedIn(a.defining_instruction(),
b.defining_instruction()->parent())) {
return false;
}
if (b.is_phi() && b.defining_instruction()->opcode() == HloOpcode::kWhile &&
(call_graph_->InstructionIsNestedIn(
a.defining_instruction(), b.defining_instruction()->while_body()) ||
call_graph_->InstructionIsNestedIn(
a.defining_instruction(),
b.defining_instruction()->while_condition()))) {
return true;
}
if (b.is_phi() &&
b.defining_instruction()->opcode() == HloOpcode::kConditional) {
for (int j = 0; j < b.defining_instruction()->branch_count(); ++j) {
if (call_graph_->InstructionIsNestedIn(
a.defining_instruction(),
b.defining_instruction()->branch_computation(j))) {
return true;
}
}
}
return ExecutesBefore(a.defining_instruction(), b.defining_instruction());
}
bool HloOrdering::UsesBeforeValueDefinition(
absl::Span<const HloUse* const> uses, const HloValue& value,
const HloDataflowAnalysis& dataflow,
bool use_is_always_before_def_in_same_instr) const {
bool has_use_in_exclusive_branches = false;
bool has_escaped_use_in_conditional = false;
auto UseIsBeforeValueDefinition = [&](const HloUse& use) {
VLOG(4) << "UseIsBeforeValueDefinition(use=" << use
<< ", value=" << value.ToShortString() << ")";
switch (
GetExecutionConstraint(use.instruction, value.defining_instruction())) {
case HloOrdering::ExecutionConstraint::kIsSame: {
if (use_is_always_before_def_in_same_instr) {
return true;
}
HloInstruction* operand =
use.instruction->mutable_operand(use.operand_number);
HloInstruction* user = value.defining_instruction();
auto operand_index_ptr =
std::make_unique<ShapeIndex>(use.operand_index);
if (use.instruction->IsAsynchronous()) {
if (value.defining_instruction()->parent() ==
use.instruction->async_wrapped_computation()) {
if (use.instruction->opcode() == HloOpcode::kAsyncStart) {
operand = use.instruction->async_wrapped_computation()
->parameter_instruction(use.operand_number);
} else {
CHECK_GT(use.operand_index.size(), 1);
operand = use.instruction->async_wrapped_computation()
->parameter_instruction(use.operand_index.at(1));
operand_index_ptr = std::make_unique<ShapeIndex>(
absl::MakeSpan(use.operand_index)
.subspan(2, use.operand_index.size() - 2));
}
}
}
if (dataflow.CanShareOperandBufferWithUser(
operand,
*operand_index_ptr,
user,
value.defining_index())) {
VLOG(4)
<< " use is value def, and instruction can share use buffer.";
return true;
}
break;
}
case HloOrdering::ExecutionConstraint::kRunExclusiveAfter:
VLOG(4) << " use and value def are in exclusive branches.";
if (!has_escaped_use_in_conditional) {
has_use_in_exclusive_branches = true;
VLOG(4) << "Allowing them to share buffer.\n";
return true;
}
VLOG(4) << "value def has escaped use in conditional. \n";
break;
case HloOrdering::ExecutionConstraint::kRunExclusiveBefore:
case HloOrdering::ExecutionConstraint::kRunBeforeStart:
case HloOrdering::ExecutionConstraint::kRunBeforeEnd:
VLOG(4)
<< " use instruction executes before value-defining instruction";
return true;
case HloOrdering::ExecutionConstraint::kRunAfter:
if (use_is_always_before_def_in_same_instr &&
use.instruction->opcode() == HloOpcode::kCollectivePermuteDone &&
use.instruction->operand(0) == value.instruction()) {
return true;
}
break;
case HloOrdering::ExecutionConstraint::kUnordered:
break;
}
if (use.instruction->opcode() == HloOpcode::kWhile) {
const HloInstruction* xla_while = use.instruction;
if (call_graph_->InstructionIsNestedIn(value.defining_instruction(),
xla_while->while_body())) {
VLOG(4) << " use is while " << use.instruction->name()
<< " and def is in body";
return true;
}
if (call_graph_->InstructionIsNestedIn(value.defining_instruction(),
xla_while->while_condition())) {
if (value.defining_instruction() !=
xla_while->while_condition()->parameter_instruction(0)) {
VLOG(4) << " use is while " << use.instruction->name()
<< " and def is in condition and is not the parameter";
return false;
} else {
VLOG(4) << " use is while " << use.instruction->name()
<< " and def is in condition and is the parameter";
return true;
}
}
}
if (value.defining_instruction()->opcode() == HloOpcode::kWhile) {
CHECK(value.is_phi());
const HloInstruction* xla_while = value.defining_instruction();
if (call_graph_->InstructionIsNestedIn(use.instruction,
xla_while->while_body()) ||
call_graph_->InstructionIsNestedIn(use.instruction,
xla_while->while_condition())) {
VLOG(4) << " value is while " << value.defining_instruction()->name()
<< " and use is in condition or body";
return true;
}
}
if (use.instruction->opcode() == HloOpcode::kCall) {
const HloInstruction* call = use.instruction;
if (call_graph_->InstructionIsNestedIn(value.defining_instruction(),
call->to_apply())) {
VLOG(4) << " use is call " << use.instruction->name()
<< " and def is in called computation";
return true;
}
}
if (use.instruction->IsAsynchronous()) {
const HloInstruction* async = use.instruction;
if (call_graph_->InstructionIsNestedIn(
value.defining_instruction(),
async->async_wrapped_computation())) {
VLOG(4) << " use is async " << use.instruction->name()
<< " and def is in called computation";
return true;
}
}
if (use.instruction->opcode() == HloOpcode::kConditional) {
const HloInstruction* conditional = use.instruction;
for (int j = 0; j < conditional->branch_count(); ++j) {
if (call_graph_->InstructionIsNestedIn(
value.defining_instruction(),
conditional->branch_computation(j))) {
if (!dataflow.ValueIsDefinedAt(
use.instruction->operand(use.operand_number), {})) {
for (auto value_use : value.GetUses()) {
VLOG(4) << "def have use:" << value_use << "\n";
if (value_use.instruction ==
value_use.instruction->parent()->root_instruction()) {
VLOG(4) << "def use is conditional root \n";
has_escaped_use_in_conditional = true;
break;
}
}
}
if (!has_use_in_exclusive_branches) {
VLOG(4) << " use is conditional " << use.instruction->name()
<< " and def is in " << j << "th branch computation";
return true;
}
}
}
if (value.defining_instruction() == use.instruction) {
VLOG(4) << " use is conditional " << use << " and def is "
<< value.ToShortString();
return true;
}
}
VLOG(4) << " use is not before value definition";
return false;
};
for (auto* use : uses) {
if (!UseIsBeforeValueDefinition(*use)) {
return false;
}
}
return true;
}
bool HloOrdering::LiveRangeStrictlyBefore(
const HloValue& a, const HloValue& b, const HloDataflowAnalysis& dataflow,
bool use_is_always_before_def_in_same_instr) const {
VLOG(4) << "LiveRangeStrictlyBefore(a = " << a.ToShortString()
<< ", b = " << b.ToShortString() << ")";
VLOG(4) << "Parent:" << a.instruction()->parent()->ToString() << "\n";
if (!IsDefinedBefore(a, b)) {
VLOG(4) << a << " not defined before " << b;
return false;
}
if (a.live_out_of_module()) {
VLOG(4) << a << " is live out of module and not defined before " << b;
return false;
}
for (const HloPosition& pos : a.positions()) {
if (pos.instruction->parent()->root_instruction() == pos.instruction &&
call_graph().InstructionIsNestedIn(b.instruction(),
pos.instruction->parent())) {
return false;
}
}
std::vector<const HloUse*> uses;
for (const HloUse& use : a.GetUses()) {
if (dataflow.DoesNotUseOperandBuffer(a.instruction(), a.index(),
use.instruction)) {
continue;
}
uses.push_back(&use);
}
if (!UsesBeforeValueDefinition(uses, b, dataflow,
use_is_always_before_def_in_same_instr)) {
VLOG(4) << "uses of " << a << "not before " << b << " is defined";
return false;
}
if (a.IsRootOf(b.instruction()->parent())) {
VLOG(4) << a << " is live out of computation and defined before " << b
<< " which is in same computation";
return false;
}
return true;
}
bool HloOrdering::MayInterfere(const HloValue& a, const HloValue& b,
const HloDataflowAnalysis& dataflow) const {
return !LiveRangeStrictlyBefore(a, b, dataflow) &&
!LiveRangeStrictlyBefore(b, a, dataflow);
}
PredecessorHloOrdering::PredecessorHloOrdering(const HloModule* module)
: HloOrdering(module) {}
bool PredecessorHloOrdering::ExecutesBeforeInSameComputation(
const HloInstruction* a, const HloInstruction* b) const {
CHECK_EQ(a->parent(), b->parent());
return a != b && predecessors_.at(a->parent())->IsReachable(a, b);
}
std::string PredecessorHloOrdering::ToStringHelper(
const std::string& name) const {
std::vector<std::string> pieces;
pieces.push_back(name);
for (auto* computation : module_->MakeNonfusionComputations()) {
pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
const auto all = computation->MakeInstructionPostOrder();
for (auto instruction : all) {
pieces.push_back(
absl::StrFormat(" %s predecessors:", instruction->name()));
for (auto predecessor : all) {
if (predecessors_.at(computation)
->IsReachable(predecessor, instruction)) {
pieces.push_back(absl::StrFormat(" %s", predecessor->name()));
}
}
}
}
return absl::StrJoin(pieces, "\n");
}
DependencyHloOrdering::DependencyHloOrdering(const HloModule* module)
: PredecessorHloOrdering(module) {
for (auto* computation : module->MakeNonfusionComputations()) {
predecessors_.emplace(computation, HloReachabilityMap::Build(computation));
}
}
std::string DependencyHloOrdering::ToString() const {
return ToStringHelper("DependencyHloOrdering");
}
SequentialHloOrdering::SequentialHloOrdering(const HloSchedule& schedule)
: HloOrdering(schedule.module()), schedule_(schedule) {
Initialize();
}
SequentialHloOrdering::SequentialHloOrdering(HloSchedule&& schedule)
: HloOrdering(schedule.module()), schedule_(std::move(schedule)) {
Initialize();
}
void SequentialHloOrdering::Initialize() {
TF_DCHECK_OK(schedule_.Verify());
for (const auto& computation_sequence : schedule_.sequences()) {
const auto& order = computation_sequence.second.instructions();
for (int i = 0; i < order.size(); ++i) {
InsertOrDie(&order_position_, order[i], i);
}
}
}
bool SequentialHloOrdering::ExecutesBeforeInSameComputation(
const HloInstruction* a, const HloInstruction* b) const {
CHECK_EQ(a->parent(), b->parent());
if (!order_position_.contains(a) || !order_position_.contains(b)) {
return false;
}
if (a->parent()->root_instruction() == a) {
return false;
}
return order_position_.at(a) < order_position_.at(b);
}
const HloInstructionSequence* SequentialHloOrdering::SequentialOrder(
const HloComputation& computation) const {
return schedule_.is_computation_scheduled(&computation)
? &schedule_.sequence(&computation)
: nullptr;
}
std::string SequentialHloOrdering::ToString() const {
return absl::StrCat("SequentialHloOrdering\n", schedule_.ToString());
}
} | #include "xla/service/hlo_ordering.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloOrderingTest : public HloTestBase {};
TEST_F(HloOrderingTest, InstructionsInDifferentComputations) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto builder_c = HloComputation::Builder("C");
HloInstruction* c = builder_c.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
HloComputation* computation_c =
module->AddEmbeddedComputation(builder_c.Build());
auto builder_b = HloComputation::Builder("B");
builder_b.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* b = builder_b.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {}, computation_c));
HloComputation* computation_b =
module->AddEmbeddedComputation(builder_b.Build());
auto builder_a = HloComputation::Builder("A");
HloInstruction* a = builder_a.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {}, computation_c));
HloComputation* computation_a =
module->AddEmbeddedComputation(builder_a.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {}, computation_a));
HloInstruction* y = builder.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {x}, computation_b));
module->AddEntryComputation(builder.Build());
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.ExecutesBefore(x, y));
EXPECT_FALSE(ordering.ExecutesBefore(y, x));
EXPECT_TRUE(ordering.ExecutesBefore(a, b));
EXPECT_FALSE(ordering.ExecutesBefore(b, a));
EXPECT_FALSE(ordering.ExecutesBefore(a, x));
EXPECT_TRUE(ordering.ExecutesBefore(a, y));
EXPECT_FALSE(ordering.ExecutesBefore(x, a));
EXPECT_FALSE(ordering.ExecutesBefore(y, a));
EXPECT_FALSE(ordering.ExecutesBefore(b, x));
EXPECT_FALSE(ordering.ExecutesBefore(b, y));
EXPECT_TRUE(ordering.ExecutesBefore(x, b));
EXPECT_FALSE(ordering.ExecutesBefore(y, b));
EXPECT_FALSE(ordering.ExecutesBefore(c, a));
EXPECT_FALSE(ordering.ExecutesBefore(c, b));
EXPECT_FALSE(ordering.ExecutesBefore(c, x));
EXPECT_FALSE(ordering.ExecutesBefore(c, y));
EXPECT_FALSE(ordering.ExecutesBefore(a, c));
EXPECT_FALSE(ordering.ExecutesBefore(b, c));
EXPECT_FALSE(ordering.ExecutesBefore(x, c));
EXPECT_FALSE(ordering.ExecutesBefore(y, c));
}
TEST_F(HloOrderingTest, InstructionsInWhileComputations) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "body_param"));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape, HloOpcode::kNegate, body_param));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "cond_param"));
auto convert = cond_builder.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(xla::PRED, {}), cond_param));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape, condition, body, constant));
module->AddEntryComputation(builder.Build());
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.ExecutesBefore(constant, xla_while));
EXPECT_TRUE(ordering.ExecutesBefore(constant, cond_param));
EXPECT_TRUE(ordering.ExecutesBefore(constant, convert));
EXPECT_TRUE(ordering.ExecutesBefore(constant, body_param));
EXPECT_TRUE(ordering.ExecutesBefore(constant, negate));
EXPECT_FALSE(ordering.ExecutesBefore(xla_while, body_param));
EXPECT_FALSE(ordering.ExecutesBefore(xla_while, cond_param));
EXPECT_FALSE(ordering.ExecutesBefore(body_param, xla_while));
EXPECT_FALSE(ordering.ExecutesBefore(cond_param, xla_while));
EXPECT_TRUE(ordering.ExecutesBefore(cond_param, body_param));
EXPECT_TRUE(ordering.ExecutesBefore(convert, body_param));
EXPECT_TRUE(ordering.ExecutesBefore(cond_param, negate));
EXPECT_TRUE(ordering.ExecutesBefore(convert, negate));
EXPECT_FALSE(ordering.ExecutesBefore(body_param, cond_param));
}
TEST_F(HloOrderingTest, ParametersDefinedBeforeOthers) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(param),
dataflow->GetValueDefinedAt(constant)));
EXPECT_TRUE(!ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(param)));
}
TEST_F(HloOrderingTest, ValuesInWhileComputations) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "body_param"));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape, HloOpcode::kNegate, body_param));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "cond_param"));
auto convert = cond_builder.AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(xla::PRED, {}), cond_param));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape, condition, body, constant));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, constant, xla_while));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while)));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while), *dataflow));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(convert), *dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(negate),
dataflow->GetValueDefinedAt(xla_while)));
EXPECT_TRUE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(negate),
dataflow->GetValueDefinedAt(xla_while), *dataflow));
EXPECT_FALSE(ordering.MayInterfere(dataflow->GetValueDefinedAt(negate),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(constant),
dataflow->GetValueDefinedAt(xla_while)));
EXPECT_TRUE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(convert),
dataflow->GetValueDefinedAt(xla_while), *dataflow));
EXPECT_FALSE(ordering.MayInterfere(dataflow->GetValueDefinedAt(convert),
dataflow->GetValueDefinedAt(xla_while),
*dataflow));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(xla_while),
dataflow->GetValueDefinedAt(add)));
ASSERT_EQ(dataflow->GetValueDefinedAt(xla_while).GetUses().size(), 1);
const HloUse* while_use =
dataflow->GetValueDefinedAt(xla_while).GetUses().data();
EXPECT_EQ(while_use->instruction, add);
EXPECT_TRUE(ordering.UsesBeforeValueDefinition(
{&while_use, 1}, dataflow->GetValueDefinedAt(add), *dataflow));
EXPECT_TRUE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(xla_while), dataflow->GetValueDefinedAt(add),
*dataflow));
}
TEST_F(HloOrderingTest, ToStringDoesNotCrash) {
const char* module_str = R"(
HloModule test_module
body.v8 {
prev.1 = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(prev.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.4, constant.1)
get-tuple-element.5 = f32[3]{0} get-tuple-element(prev.1), index=3
get-tuple-element.6 = f32[3]{0} get-tuple-element(prev.1), index=1
get-tuple-element.7 = f32[3]{0} get-tuple-element(prev.1), index=2
ROOT tuple = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) tuple(add, get-tuple-element.5, get-tuple-element.6, get-tuple-element.7)
}
condition.v4 {
constant.2 = s32[] constant(2)
prev.2 = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) parameter(0)
get-tuple-element.8 = s32[] get-tuple-element(prev.2), index=0
ROOT greater-than = pred[] compare(constant.2, get-tuple-element.8), direction=GT
}
fused_computation {
get-tuple-element.5.param_1 = f32[3]{0} parameter(1)
get-tuple-element.6.param_2 = f32[3]{0} parameter(2)
add.4 = f32[3]{0} add(get-tuple-element.5.param_1, get-tuple-element.6.param_2)
get-tuple-element.7.param_1.1 = f32[3]{0} parameter(0)
ROOT add.5 = f32[3]{0} add(add.4, get-tuple-element.7.param_1.1)
}
ENTRY while.v11 {
constant.5 = s32[] constant(0)
constant.6 = f32[3]{0} constant({1, 1, 1})
constant.7 = f32[3]{0} constant({2, 2, 2})
constant.8 = f32[3]{0} constant({3, 3, 3})
tuple.1 = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) tuple(constant.5, constant.6, constant.7, constant.8)
while = (s32[], f32[3]{0}, f32[3]{0}, f32[3]{0}) while(tuple.1), condition=condition.v4, body=body.v8
get-tuple-element.9 = f32[3]{0} get-tuple-element(while), index=3
get-tuple-element.10 = f32[3]{0} get-tuple-element(while), index=1
get-tuple-element.11 = f32[3]{0} get-tuple-element(while), index=2
ROOT fusion = f32[3]{0} fusion(get-tuple-element.9, get-tuple-element.10, get-tuple-element.11), kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DependencyHloOrdering ordering(module.get());
ordering.ToString();
}
TEST_F(HloOrderingTest, ConditionalInstructionOrdering) {
const char* module_str = R"(
HloModule test_conditional_module
true_branch {
param.1 = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(param.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(param.1), index=1
add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple.1 = (s32[], s32[]) tuple(add.1, get-tuple-element.1)
}
false_branch {
param.2 = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(param.2), index=0
get-tuple-element.4 = s32[] get-tuple-element(param.2), index=1
add.2 = s32[] add(get-tuple-element.3, get-tuple-element.4)
ROOT tuple.2 = (s32[], s32[]) tuple(add.2, get-tuple-element.4)
}
ENTRY root {
param.3 = (pred[], (s32[], s32[])) parameter(0)
pred.1 = pred[] get-tuple-element(param.3), index=0
cond_arg.1 = (s32[], s32[]) get-tuple-element(param.3), index=1
conditional = (s32[], s32[]) conditional(pred.1, cond_arg.1, cond_arg.1), true_computation=true_branch, false_computation=false_branch
cond_res.1 = s32[] get-tuple-element(conditional), index=0
cond_res.2 = s32[] get-tuple-element(conditional), index=1
add.3 = s32[] add(cond_res.1, cond_res.2)
ROOT result = (s32[], s32[], s32[]) tuple(add.3, cond_res.1, cond_res.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
HloInstruction* add_1 = FindInstruction(module.get(), "add.1");
HloInstruction* add_2 = FindInstruction(module.get(), "add.2");
HloInstruction* add_3 = FindInstruction(module.get(), "add.3");
HloInstruction* conditional = FindInstruction(module.get(), "conditional");
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_1),
dataflow->GetValueDefinedAt(add_2)));
EXPECT_TRUE(
ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_2),
dataflow->GetValueDefinedAt(conditional)));
EXPECT_TRUE(
ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_1),
dataflow->GetValueDefinedAt(conditional)));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_1),
dataflow->GetValueDefinedAt(add_3)));
EXPECT_TRUE(ordering.IsDefinedBefore(dataflow->GetValueDefinedAt(add_2),
dataflow->GetValueDefinedAt(add_3)));
}
TEST_F(HloOrderingTest,
ValuesLiveOutOfModuleInterfereWithInstructionsAfterRoot) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto builder = HloComputation::Builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
HloInstruction* dead = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
HloComputation* entry =
module->AddEntryComputation(builder.Build(root));
HloSchedule schedule(module.get());
schedule.set_sequence(entry, {param, root, dead});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
EXPECT_FALSE(ordering.ExecutesBefore(root, dead));
EXPECT_FALSE(ordering.ExecutesBefore(dead, root));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(root), dataflow->GetValueDefinedAt(dead),
*dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(root),
dataflow->GetValueDefinedAt(dead),
*dataflow));
}
TEST_F(HloOrderingTest,
ValuesLiveOutOfComputationInterfereWithInstructionsAfterRoot) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
auto subbuilder = HloComputation::Builder(TestName() + ".sub");
HloInstruction* param = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* root = subbuilder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
HloInstruction* dead = subbuilder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
HloComputation* subcomputation = module->AddEmbeddedComputation(
subbuilder.Build(root));
auto builder = HloComputation::Builder(TestName());
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(scalar_shape, {c}, subcomputation));
HloComputation* entry = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(subcomputation, {param, root, dead});
schedule.set_sequence(entry, {c, call});
TF_ASSERT_OK(schedule.Verify());
SequentialHloOrdering ordering(schedule);
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
EXPECT_FALSE(ordering.ExecutesBefore(root, dead));
EXPECT_FALSE(ordering.ExecutesBefore(dead, root));
EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
dataflow->GetValueDefinedAt(root), dataflow->GetValueDefinedAt(dead),
*dataflow));
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(root),
dataflow->GetValueDefinedAt(dead),
*dataflow));
}
TEST_F(HloOrderingTest, InterferenceWithOuterRoot) {
absl::string_view hlo_string = R"(
HloModule InterferenceWithOuterRoot, is_scheduled=true
Embedded (embedded_param: f32[4096,4096]) -> f32[4096,4096] {
embedded_param = f32[4096,4096]{1,0} parameter(0)
multiply = f32[4096,4096]{1,0} multiply(embedded_param, embedded_param)
ROOT log = f32[4096,4096]{1,0} log(multiply)
}
ENTRY InterferenceWithOuterRoot {
param = f32[4096,4096]{1,0} parameter(0)
ROOT add = f32[4096,4096]{1,0} add(param, param)
call = f32[4096,4096]{1,0} call(param), to_apply=Embedded
}
)";
HloModuleConfig hlo_config;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, hlo_config));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
auto multiply = FindInstruction(module.get(), "multiply");
auto add = FindInstruction(module.get(), "add");
EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(multiply),
dataflow->GetValueDefinedAt(add),
*dataflow));
}
TEST_F(HloOrderingTest, RootNotLastInstruction) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
body2 {
p_body2 = (f32[2]{0}) parameter(0)
p_body2.1 = f32[2]{0} get-tuple-element(p_body2), index=0
add.3 = f32[2]{0} add(p_body2.1, p_body2.1)
ROOT root2 = (f32[2]{0}) tuple(add.3)
}
condition2 {
p_cond2 = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
body {
p_body = (f32[2]{0}) parameter(0)
p_body.1 = f32[2]{0} get-tuple-element(p_body), index=0
ROOT root = (f32[2]{0}) tuple(p_body.1)
copy = f32[2]{0} copy(p_body.1)
tuple = (f32[2]{0}) tuple(copy)
while.1 = (f32[2]{0}) while(tuple), condition=condition2, body=body2
}
condition {
p_cond = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const0 = f32[2]{0} constant({1, 2})
while_init = (f32[2]{0}) tuple(const0)
ROOT while.0 = (f32[2]{0}) while(while_init), condition=condition, body=body
}
)";
HloModuleConfig hlo_config;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, hlo_config));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
SequentialHloOrdering ordering(module->schedule());
auto root = FindInstruction(module.get(), "root");
auto p_body_2 = FindInstruction(module.get(), "p_body2");
auto tuple_use = HloUse{root, 0};
const HloValue& value = dataflow->GetUniqueValueAt(p_body_2, {0});
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&tuple_use}, value, *dataflow));
}
TEST_F(HloOrderingTest, AsyncCallUses) {
absl::string_view hlo_string = R"(
HloModule single_sc_async_call
%called_computation {
%out_param = s32[1024]{0} parameter(1)
%input = s32[1024]{0} parameter(0)
%size = s32[] constant(256)
%index = s32[] custom-call(), custom_call_target="Baz"
%start = s32[] multiply(s32[] %size, s32[] %index)
%input2 = s32[256]{0} dynamic-slice(s32[1024]{0} %input, s32[] %start), dynamic_slice_sizes={256}
%output = s32[256]{0} add(s32[256]{0} %input2, s32[256]{0} %input2)
ROOT %output2 = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %out_param, s32[256]{0} %output, s32[] %start)
}, execution_thread="foobar"
%async_wrapped {
%async_param = s32[1024]{0} parameter(0)
%async_param.1 = s32[1024]{0} parameter(1)
ROOT %call = s32[1024]{0} call(s32[1024]{0} %async_param, s32[1024]{0} %async_param.1), to_apply=%called_computation
}, execution_thread="foobar"
ENTRY %main {
%input.1 = s32[1024]{0} parameter(0)
%buf = s32[1024]{0} custom-call(), custom_call_target="AllocateBuffer"
%async-start = ((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) async-start(s32[1024]{0} %input.1, s32[1024]{0} %buf), async_execution_thread="foobar", calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) %async-start), async_execution_thread="foobar", calls=%async_wrapped
}
)";
HloModuleConfig hlo_config;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, hlo_config));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
auto async_start = FindInstruction(module.get(), "async-start");
auto async_done = FindInstruction(module.get(), "async-done");
auto call = FindInstruction(module.get(), "call");
auto output2 = FindInstruction(module.get(), "output2");
auto async_start_use = HloUse{async_start, 1};
auto async_done_use = HloUse{async_done, 0, {0, 1}};
auto call_use = HloUse{call, 1};
const HloValue& value = dataflow->GetUniqueValueAt(output2, {});
EXPECT_TRUE(ordering.UsesBeforeValueDefinition(
{&async_start_use, &call_use, &async_done_use}, value, *dataflow));
}
TEST_F(HloOrderingTest,
UsesBeforeValueDefinitionValueIsAsyncWrappedCallInstruction) {
constexpr absl::string_view hlo_string = R"(
HloModule UsesBeforeValueDefinitionValueIsAsyncWrappedCallInstruction, input_output_alias={ {}: (0, {}, must-alias) }, entry_computation_layout={(f32[2,2])->f32[2,2]}
%host_computation {
%arg_0.2 = f32[2,2] parameter(0)
%constant.1 = f32[] constant(2)
%broadcast.1 = f32[2,2] broadcast(f32[] %constant.1), dimensions={}
ROOT %multiply.1 = f32[2,2] multiply(f32[2,2] %arg_0.2, f32[2,2] %broadcast.1)
}, execution_thread="host"
%async_wrapped_comp {
%param_0 = f32[2,2] parameter(0)
ROOT %async_wrapped_call = f32[2,2] custom-call(f32[2,2] %param_0), custom_call_target="HostExecute", called_computations={%host_computation}
}, execution_thread="host"
ENTRY %main {
%p0 = f32[2,2] parameter(0)
%host-async-start = ((f32[2,2]), f32[2,2], u32[]) async-start(f32[2,2] %p0), async_execution_thread="host", calls=%async_wrapped_comp
%host-async-done = f32[2,2] async-done(((f32[2,2]), f32[2,2], u32[]) %host-async-start)
ROOT %copy.1 = f32[2,2] copy(f32[2,2] %host-async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> dataflow,
HloDataflowAnalysis::Run(*module, true));
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
HloInstruction* async_wrapped_call =
FindInstruction(module.get(), "async_wrapped_call");
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* broadcast1 = FindInstruction(module.get(), "broadcast.1");
ASSERT_NE(async_start, nullptr);
ASSERT_NE(async_done, nullptr);
ASSERT_NE(async_wrapped_call, nullptr);
ASSERT_NE(p0, nullptr);
HloUse async_start_use = HloUse{async_start, 0};
HloUse async_done_use = HloUse{async_done, 0, {0, 0}};
HloUse call_use = HloUse{async_wrapped_call, 0};
const HloValue& value = dataflow->GetUniqueValueAt(async_wrapped_call, {});
const HloValue& broadcast_value = dataflow->GetUniqueValueAt(broadcast1, {});
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_start_use}, value, *dataflow));
EXPECT_TRUE(ordering.UsesBeforeValueDefinition({&async_start_use},
broadcast_value, *dataflow));
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&call_use}, value, *dataflow));
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_done_use}, value, *dataflow));
}
TEST_F(HloOrderingTest,
UsesBeforeValueDefinitionValueIsAnAliasedAsyncWrappedCallInstruction) {
constexpr absl::string_view hlo_string = R"(
HloModule UsesBeforeValueDefinitionValueIsAnAliasedAsyncWrappedCallInstruction, input_output_alias={ {}: (0, {}, must-alias) }, entry_computation_layout={(f32[2,2])->f32[2,2]}
%host_computation {
%arg_0.2 = f32[2,2] parameter(0)
%constant.1 = f32[] constant(2)
%broadcast.1 = f32[2,2] broadcast(f32[] %constant.1), dimensions={}
ROOT %multiply.1 = f32[2,2] multiply(f32[2,2] %arg_0.2, f32[2,2] %broadcast.1)
}, execution_thread="host"
%async_wrapped_comp {
%param_0 = f32[2,2] parameter(0)
ROOT %async_wrapped_call = f32[2,2] custom-call(f32[2,2] %param_0), custom_call_target="HostExecute", called_computations={%host_computation}, output_to_operand_aliasing={{}: (0, {})}
}, execution_thread="host"
ENTRY %main {
%p0 = f32[2,2] parameter(0)
%host-async-start = ((f32[2,2]), f32[2,2], u32[]) async-start(f32[2,2] %p0), async_execution_thread="host", calls=%async_wrapped_comp
ROOT %host-async-done = f32[2,2] async-done(((f32[2,2]), f32[2,2], u32[]) %host-async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> dataflow,
HloDataflowAnalysis::Run(*module, true));
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
HloInstruction* async_wrapped_call =
FindInstruction(module.get(), "async_wrapped_call");
HloInstruction* p0 = FindInstruction(module.get(), "p0");
ASSERT_NE(async_start, nullptr);
ASSERT_NE(async_done, nullptr);
ASSERT_NE(async_wrapped_call, nullptr);
ASSERT_NE(p0, nullptr);
HloUse async_start_use = HloUse{async_start, 0};
HloUse async_done_use = HloUse{async_done, 0, {0, 0}};
HloUse call_use = HloUse{async_wrapped_call, 0};
const HloValue& value = dataflow->GetUniqueValueAt(async_wrapped_call, {});
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_start_use}, value, *dataflow));
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&call_use}, value, *dataflow));
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_done_use}, value, *dataflow));
}
TEST_F(HloOrderingTest, AsyncOpUsesBeforeValueDefinitionUseIsTuple) {
constexpr absl::string_view hlo_string = R"(
HloModule AsyncOpUsesBeforeValueDefinitionUseIsTuple, entry_computation_layout={((f32[2,2]))->f32[2,2]}
%host_computation {
p0 = (f32[2,2]) parameter(0)
p0_gte = f32[2,2] get-tuple-element(p0), index=0
%constant.1 = f32[] constant(2)
%broadcast.1 = f32[2,2] broadcast(f32[] %constant.1), dimensions={}
ROOT %multiply.1 = f32[2,2] multiply(f32[2,2] p0_gte, f32[2,2] %broadcast.1)
}, execution_thread="host"
%async_wrapped_comp {
%param_0 = (f32[2,2]) parameter(0)
ROOT %async_wrapped_call = f32[2,2] custom-call(%param_0), custom_call_target="HostExecute", called_computations={%host_computation}
}, execution_thread="host"
ENTRY %main {
%p0 = (f32[2,2]) parameter(0)
%host-async-start = (((f32[2,2])), f32[2,2], u32[]) async-start(%p0), async_execution_thread="host", calls=%async_wrapped_comp
ROOT %host-async-done = f32[2,2] async-done(%host-async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloDataflowAnalysis> dataflow,
HloDataflowAnalysis::Run(*module, true));
HloInstruction* async_start =
FindInstruction(module.get(), "host-async-start");
HloInstruction* async_done = FindInstruction(module.get(), "host-async-done");
HloInstruction* async_wrapped_call =
FindInstruction(module.get(), "async_wrapped_call");
HloInstruction* p0 = FindInstruction(module.get(), "p0");
ASSERT_NE(async_start, nullptr);
ASSERT_NE(async_done, nullptr);
ASSERT_NE(async_wrapped_call, nullptr);
ASSERT_NE(p0, nullptr);
HloUse async_start_use = HloUse{async_start, 0};
HloUse async_done_use = HloUse{async_done, 0, {0, 0, 0}};
HloUse call_use = HloUse{async_wrapped_call, 0};
const HloValue& value = dataflow->GetUniqueValueAt(async_wrapped_call, {});
DependencyHloOrdering ordering(module.get());
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_start_use}, value, *dataflow));
EXPECT_FALSE(
ordering.UsesBeforeValueDefinition({&call_use}, value, *dataflow));
EXPECT_TRUE(
ordering.UsesBeforeValueDefinition({&async_done_use}, value, *dataflow));
}
TEST_F(HloOrderingTest, OrderingBetweenAsyncOpAndItsWrapped) {
constexpr absl::string_view hlo = R"(
HloModule test
%async_computation {
%param_0 = f32[10,32,512]{2,1,0:T(8,128)S(5)} parameter(0)
%param_1 = f32[1,32,512]{2,1,0:T(8,128)} parameter(1)
%param_2 = s32[]{:T(128)} parameter(2)
%param_3 = s32[]{:T(128)} parameter(3)
%param_4 = s32[]{:T(128)} parameter(4)
ROOT %dynamic-update-slice.1 = f32[10,32,512]{2,1,0:T(8,128)S(5)}
dynamic-update-slice(%param_0, %param_1, %param_2, %param_3, %param_4)
}
ENTRY %main {
%param.1 = (s32[]{:T(128)}, f32[32,512]{1,0:T(8,128)},
f32[10,32,512]{2,1,0:T(8,128)S(5)}) parameter(0)
%get-tuple-element.132 = f32[10,32,512]{2,1,0:T(8,128)S(5)} get-tuple-element(
%param.1), index=2
%get-tuple-element.131 = f32[32,512]{1,0:T(8,128)} get-tuple-element(
%param.1), index=1
%cosine.0 = f32[32,512]{1,0:T(8,128)} cosine(%get-tuple-element.131)
%reshape.6 = f32[1,32,512]{2,1,0:T(8,128)} reshape(%cosine.0)
%get-tuple-element.130 = s32[]{:T(128)} get-tuple-element(%param.1), index=0
%constant.49 = s32[]{:T(128)} constant(0)
%compare.13 = pred[]{:T(512)} compare(
%get-tuple-element.130, %constant.49), direction=LT
%constant.50 = s32[]{:T(128)} constant(10)
%add.22 = s32[]{:T(128)} add(%get-tuple-element.130, %constant.50)
%select.6 = s32[]{:T(128)} select(
%compare.13, %add.22, %get-tuple-element.130)
%dynamic-update-slice-start = (
(f32[10,32,512]{2,1,0:T(8,128)S(5)}, f32[1,32,512]{2,1,0:T(8,128)},
s32[]{:T(128)}, s32[]{:T(128)}, s32[]{:T(128)}),
f32[10,32,512]{2,1,0:T(8,128)S(5)}, u32[]) async-start(
%get-tuple-element.132, %reshape.6, %select.6,
%constant.49, %constant.49), calls=%async_computation
ROOT %dynamic-update-slice-done = f32[10,32,512]{2,1,0:T(8,128)S(5)}
async-done(%dynamic-update-slice-start), calls=%async_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(hlo));
TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
HloDataflowAnalysis::Run(*module, true));
DependencyHloOrdering ordering(module.get());
auto* async_start =
FindInstruction(module.get(), "dynamic-update-slice-start");
auto* async_done = FindInstruction(module.get(), "dynamic-update-slice-done");
auto* dus = FindInstruction(module.get(), "dynamic-update-slice.1");
EXPECT_EQ(ordering.GetExecutionConstraint(async_start, dus),
HloOrdering::ExecutionConstraint::kIsSame);
EXPECT_EQ(ordering.GetExecutionConstraint(async_done, dus),
HloOrdering::ExecutionConstraint::kIsSame);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_ordering.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_ordering_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b02af755-636c-49b9-876f-0fd35babbbb7 | cpp | tensorflow/tensorflow | tuple_simplifier | third_party/xla/xla/service/tuple_simplifier.cc | third_party/xla/xla/service/tuple_simplifier_test.cc | #include "xla/service/tuple_simplifier.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
TupleSimplifier::TupleSimplifier(bool exclude_entry_computation)
: exclude_entry_computation_(exclude_entry_computation) {}
absl::StatusOr<bool> TupleSimplifier::RemoveWholeTuple(HloInstruction* tuple) {
HloInstruction* top_tuple = nullptr;
for (int64_t operand_number = 0; operand_number < tuple->operand_count();
++operand_number) {
HloInstruction* operand = tuple->mutable_operand(operand_number);
if (operand->opcode() != HloOpcode::kGetTupleElement ||
operand->tuple_index() != operand_number) {
return false;
}
if (top_tuple == nullptr) {
top_tuple = operand->mutable_operand(0);
if (!ShapeUtil::Compatible(top_tuple->shape(), tuple->shape())) {
return false;
}
} else if (top_tuple != operand->operand(0)) {
return false;
}
}
if (top_tuple == nullptr) {
return false;
}
TF_ASSIGN_OR_RETURN(bool changed,
tuple->parent()->ReplaceInstruction(
tuple, top_tuple, true));
return changed;
}
absl::StatusOr<bool> TupleSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
if (exclude_entry_computation_ &&
computation == module->entry_computation()) {
continue;
}
for (auto* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kTuple) {
TF_ASSIGN_OR_RETURN(bool c, RemoveWholeTuple(instruction));
changed |= c;
} else {
auto [ancestor, index] = instruction->LatestNonGteAncestorAndIndex();
if (ancestor == instruction) {
continue;
}
HloInstruction* replacement = ancestor;
for (int i = 0; i < index.size(); ++i) {
if (replacement->opcode() != HloOpcode::kTuple) {
replacement = nullptr;
break;
}
replacement = replacement->mutable_operand(index[i]);
}
if (replacement) {
TF_ASSIGN_OR_RETURN(bool replaced,
computation->ReplaceInstruction(
instruction, replacement,
true,
true));
changed |= replaced;
}
}
}
}
if (module->has_schedule()) {
TF_RETURN_IF_ERROR(module->schedule().Update());
}
return changed;
}
} | #include "xla/service/tuple_simplifier.h"
#include <memory>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class TupleSimplifierTest : public HloTestBase {
protected:
void Run(HloModule* module, bool change_expected) {
auto changed_status = RunHloPass(TupleSimplifier(), module);
TF_ASSERT_OK(changed_status.status());
EXPECT_EQ(change_expected, changed_status.value());
}
void Run(HloModule* module, bool change_expected, bool exclude_entry) {
auto changed_status = RunHloPass(TupleSimplifier(exclude_entry), module);
TF_ASSERT_OK(changed_status.status());
EXPECT_EQ(change_expected, changed_status.value());
}
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
const Shape tuple_shape_ = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(F32, {})});
};
TEST_F(TupleSimplifierTest, TupleOfParameters) {
constexpr absl::string_view kModuleStr = R"(
HloModule TupleOfParameters, entry_computation_layout={(f32[], f32[], f32[])->(f32[], f32[], f32[])}
ENTRY %TupleOfParameters (param0: f32[], param1: f32[], param2: f32[]) -> (f32[], f32[], f32[]) {
%param0 = f32[] parameter(0)
%param1 = f32[] parameter(1)
%param2 = f32[] parameter(2)
ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %param0, f32[] %param1, f32[] %param2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, GteOfTupleOfParameter) {
constexpr absl::string_view kModuleStr = R"(
HloModule GteOfTupleOfParameter, entry_computation_layout={((f32[], f32[], f32[]))->f32[]}
ENTRY %GteOfTupleOfParameter (param: (f32[], f32[], f32[])) -> f32[] {
%param = (f32[], f32[], f32[]) parameter(0)
ROOT %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, GteOfTuple) {
constexpr absl::string_view kModuleStr = R"(
HloModule GteOfTuple, entry_computation_layout={(f32[], f32[], f32[])->f32[]}
ENTRY %GteOfTuple (param0: f32[], param1: f32[], param2: f32[]) -> f32[] {
%param0 = f32[] parameter(0)
%param1 = f32[] parameter(1)
%param2 = f32[] parameter(2)
%tuple = (f32[], f32[], f32[]) tuple(f32[] %param0, f32[] %param1, f32[] %param2)
ROOT %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Tuple()));
Run(module.get(), true);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Parameter(1));
}
TEST_F(TupleSimplifierTest, GteOfTupleChain) {
constexpr absl::string_view kModuleStr = R"(
HloModule GteOfTupleChain, entry_computation_layout={(f32[])->f32[]}
ENTRY %GteOfTupleChain (param: f32[]) -> f32[] {
%param = f32[] parameter(0)
%tuple = (f32[], f32[], f32[]) tuple(f32[] %param, f32[] %param, f32[] %param)
%get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple), index=1
%tuple.1 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element, f32[] %get-tuple-element)
%get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.1), index=1
%tuple.2 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.1, f32[] %get-tuple-element.1, f32[] %get-tuple-element.1)
%get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.2), index=1
%tuple.3 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.2, f32[] %get-tuple-element.2, f32[] %get-tuple-element.2)
%get-tuple-element.3 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.3), index=1
%tuple.4 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.3, f32[] %get-tuple-element.3, f32[] %get-tuple-element.3)
%get-tuple-element.4 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.4), index=1
%tuple.5 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.4, f32[] %get-tuple-element.4, f32[] %get-tuple-element.4)
%get-tuple-element.5 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.5), index=1
%tuple.6 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.5, f32[] %get-tuple-element.5, f32[] %get-tuple-element.5)
%get-tuple-element.6 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.6), index=1
%tuple.7 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.6, f32[] %get-tuple-element.6, f32[] %get-tuple-element.6)
%get-tuple-element.7 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.7), index=1
%tuple.8 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.7, f32[] %get-tuple-element.7, f32[] %get-tuple-element.7)
%get-tuple-element.8 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.8), index=1
%tuple.9 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.8, f32[] %get-tuple-element.8, f32[] %get-tuple-element.8)
%get-tuple-element.9 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.9), index=1
ROOT %negate = f32[] negate(f32[] %get-tuple-element.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Negate(op::GetTupleElement(op::Tuple())));
Run(module.get(), true);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Negate(op::Parameter()));
}
TEST_F(TupleSimplifierTest, NestedGteOfTuples) {
constexpr absl::string_view kModuleStr = R"(
HloModule NestedGteOfTuples, entry_computation_layout={(f32[])->f32[]}
ENTRY %NestedGteOfTuples (param: f32[]) -> f32[] {
%param = f32[] parameter(0)
%tuple = (f32[], f32[]) tuple(f32[] %param, f32[] %param)
%tuple.1 = ((f32[], f32[]), (f32[], f32[])) tuple((f32[], f32[]) %tuple, (f32[], f32[]) %tuple)
%tuple.2 = (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))
tuple(
((f32[], f32[]), (f32[], f32[])) %tuple.1,
((f32[], f32[]), (f32[], f32[])) %tuple.1
)
%tuple.3 = ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))))
tuple(
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) %tuple.2,
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) %tuple.2
)
%tuple.4 = (((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))),
((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))))
tuple(
((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) %tuple.3,
((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) %tuple.3
)
%get-tuple-element = ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))))
get-tuple-element(
(((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))),
((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))))) %tuple.4
), index=0
%get-tuple-element.1 = (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))
get-tuple-element(
((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))),
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) %get-tuple-element
), index=0
%get-tuple-element.2 = ((f32[], f32[]), (f32[], f32[]))
get-tuple-element(
(((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) %get-tuple-element.1
), index=0
%get-tuple-element.3 = (f32[], f32[])
get-tuple-element(
((f32[], f32[]), (f32[], f32[])) %get-tuple-element.2
), index=0
ROOT %get-tuple-element.4 = f32[] get-tuple-element((f32[], f32[]) %get-tuple-element.3), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement());
Run(module.get(), true);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Parameter(0));
}
TEST_F(TupleSimplifierTest, TupleOfGteInstructions) {
constexpr absl::string_view kModuleStr = R"(
HloModule TupleOfGteInstructions, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[], f32[])}
ENTRY %TupleOfGteInstructions (param: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) {
%param = (f32[], f32[], f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0
%get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1
%get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=2
ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.1, f32[] %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::GetTupleElement()));
Run(module.get(), true);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Parameter(0));
}
TEST_F(TupleSimplifierTest, TupleOfGteNotRemovedIfOrderIsNotPreserved) {
constexpr absl::string_view kModuleStr = R"(
HloModule TupleOfGteInstructions, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[], f32[])}
ENTRY %TupleOfGteInstructions (param: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) {
%param = (f32[], f32[], f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0
%get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1
%get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=2
ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.2, f32[] %get-tuple-element.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, IncompatibleTuples) {
constexpr absl::string_view kModuleStr = R"(
HloModule IncompatibleTuples, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[])}
ENTRY %IncompatibleTuples (param: (f32[], f32[], f32[])) -> (f32[], f32[]) {
%param = (f32[], f32[], f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0
%get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1
ROOT %tuple = (f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, CanExcludeEntryComputation) {
constexpr absl::string_view kModuleStr = R"(
HloModule CanExcludeEntryComputation, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[])}
%c1 (param: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) {
%param = (f32[], f32[], f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0
%get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1
%get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=2
ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.1, f32[] %get-tuple-element.2)
}
%c2 (param.1: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) {
%param.1 = (f32[], f32[], f32[]) parameter(0)
%get-tuple-element.3 = f32[] get-tuple-element((f32[], f32[], f32[]) %param.1), index=0
%get-tuple-element.4 = f32[] get-tuple-element((f32[], f32[], f32[]) %param.1), index=1
%get-tuple-element.5 = f32[] get-tuple-element((f32[], f32[], f32[]) %param.1), index=2
ROOT %tuple.1 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.3, f32[] %get-tuple-element.4, f32[] %get-tuple-element.5)
}
ENTRY %e (param.2: (f32[], f32[], f32[])) -> (f32[], f32[]) {
%param.2 = (f32[], f32[], f32[]) parameter(0)
%call = (f32[], f32[], f32[]) call((f32[], f32[], f32[]) %param.2), to_apply=%c1
%get-tuple-element.6 = f32[] get-tuple-element((f32[], f32[], f32[]) %call), index=0
%call.1 = (f32[], f32[], f32[]) call((f32[], f32[], f32[]) %param.2), to_apply=%c2
%get-tuple-element.7 = f32[] get-tuple-element((f32[], f32[], f32[]) %call.1), index=1
%tuple.2 = (f32[], f32[]) tuple(f32[] %get-tuple-element.6, f32[] %get-tuple-element.7)
%get-tuple-element.8 = f32[] get-tuple-element((f32[], f32[]) %tuple.2), index=0
%get-tuple-element.9 = f32[] get-tuple-element((f32[], f32[]) %tuple.2), index=1
ROOT %tuple.3 = (f32[], f32[]) tuple(f32[] %get-tuple-element.8, f32[] %get-tuple-element.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), true, true);
EXPECT_THAT(FindComputation(module.get(), "c1")->root_instruction(),
op::Parameter(0));
EXPECT_THAT(FindComputation(module.get(), "c2")->root_instruction(),
op::Parameter(0));
EXPECT_EQ(module->entry_computation()->instruction_count(), 9);
}
TEST_F(TupleSimplifierTest, ShardingInfoIsNotBeLost) {
constexpr absl::string_view kModuleStr = R"(
HloModule m
ENTRY test {
p0 = s32[10] parameter(0), sharding={devices=[2]0,1}
t = (s32[10]) tuple(p0)
ROOT %gte = s32[10] get-tuple-element(t), index=0, sharding={replicated}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), false);
}
TEST_F(TupleSimplifierTest, NestedTuple) {
constexpr absl::string_view kModuleStr = R"(
HloModule m
ENTRY test {
p0 = s32[10] parameter(0), sharding={devices=[2]0,1}
p1 = s32[10] parameter(1), sharding={devices=[2]0,1}
p2 = s32[10] parameter(2), sharding={devices=[2]0,1}
p3 = s32[10] parameter(3), sharding={devices=[2]0,1}
t = (s32[10], s32[10]) tuple(p0, p1), sharding={{devices=[2]0,1}, {devices=[2]0,1}}
t2 = ((s32[10], s32[10]), s32[10]) tuple(t, p2), sharding={{devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}}
t3 = (((s32[10], s32[10]), s32[10]), s32[10]) tuple(t2, p3), sharding={{devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}}
gte0 = ((s32[10], s32[10]), s32[10]) get-tuple-element(t3), index=0, sharding={{replicated}, {replicated}, {replicated}}
gte1 = (s32[10], s32[10]) get-tuple-element(gte0), index=0, sharding={{replicated}, {replicated}}
gte2 = s32[10] get-tuple-element(gte1), index=1, sharding={devices=[2]0,1}
gte3 = s32[10] get-tuple-element(gte1), index=0, sharding={replicated}
ROOT to = (s32[10], s32[10]) tuple(gte2, gte3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
Run(module.get(), true);
auto* p1 = FindInstruction(module.get(), "p1");
auto* gte3 = FindInstruction(module.get(), "gte3");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0), p1);
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(1), gte3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
692904ef-2ba2-4fdc-9174-f6b95754dfd7 | cpp | tensorflow/tensorflow | p2p_schedule_preparation | third_party/xla/xla/service/p2p_schedule_preparation.cc | third_party/xla/xla/service/p2p_schedule_preparation_test.cc | #include "xla/service/p2p_schedule_preparation.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsP2POp(const HloInstruction* op) {
auto p2p = DynCast<HloSendRecvInstruction>(op);
return p2p != nullptr && !p2p->is_host_transfer();
}
bool IsCollectiveOp(const HloInstruction* op) {
HloOpcode opcode = op->opcode();
if (opcode == HloOpcode::kCustomCall) {
return true;
}
return hlo_query::IsAsyncCollectiveDoneOp(op, true) ||
(hlo_query::IsCollectiveCommunicationOp(opcode) &&
!hlo_query::IsAsyncCollectiveStartOp(op, true));
}
HloInstruction* GetStartOpForDoneOp(HloInstruction* op) {
switch (op->opcode()) {
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGatherDone:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
return op->mutable_operand(0);
default:
return op;
}
}
enum P2PGroupKind { kUnpipelined = 0, kPipelined = 1, kUnrecognized = 2 };
enum P2PRuntimeStream { kUnknown = 0, kStream0 = 1, kStream1 = 2 };
struct P2PGroupNode {
bool RecordParentComputation(HloComputation* parent) {
if (computation == nullptr) {
computation = parent;
return true;
}
return computation == parent;
}
bool RecordP2POp(HloSendRecvInstruction* p2p) {
if (!RecordParentComputation(p2p->parent())) {
return false;
}
switch (p2p->opcode()) {
case HloOpcode::kRecvDone:
if (recv_done == nullptr) {
recv_done = Cast<HloRecvDoneInstruction>(p2p);
return true;
}
break;
case HloOpcode::kSendDone:
if (send_done == nullptr) {
send_done = Cast<HloSendDoneInstruction>(p2p);
return true;
}
break;
case HloOpcode::kRecv:
if (recv == nullptr) {
recv = Cast<HloRecvInstruction>(p2p);
return true;
}
break;
case HloOpcode::kSend:
if (send == nullptr) {
send = Cast<HloSendInstruction>(p2p);
return true;
}
break;
default:
break;
}
return false;
}
bool RecordWhileOp(HloInstruction* while_op) {
if (while_loop != nullptr) {
return false;
}
if (!RecordParentComputation(while_op->parent())) {
return false;
}
while_loop = while_op;
return true;
}
bool Incomplete() const {
return recv_done == nullptr || send_done == nullptr || recv == nullptr ||
send == nullptr;
}
bool IncompletePipelinedParent() const {
return Incomplete() || while_loop == nullptr;
}
P2PRuntimeStream GetRuntimeStream(const HloInstruction* start) const {
auto it = start->frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it != start->frontend_attributes().map().end()) {
if (it->second == "0") {
return kStream0;
}
if (it->second == "1") {
return kStream1;
}
}
return kUnknown;
}
P2PRuntimeStream GetRuntimeStream() const {
P2PRuntimeStream send_stream = GetRuntimeStream(send);
P2PRuntimeStream recv_stream = GetRuntimeStream(recv);
if (send_stream != recv_stream) {
return kUnknown;
}
return send_stream;
}
int64_t GetChannel() const { return recv->channel_id().value(); }
HloRecvDoneInstruction* recv_done = nullptr;
HloSendDoneInstruction* send_done = nullptr;
HloRecvInstruction* recv = nullptr;
HloSendInstruction* send = nullptr;
HloComputation* computation = nullptr;
HloInstruction* while_loop = nullptr;
};
struct P2PGroup;
using P2PGroupMap = absl::flat_hash_map<int64_t, P2PGroup>;
using P2PInComputation =
absl::flat_hash_map<const HloComputation*, std::set<int64_t>>;
using CollectiveInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
using ChainStartEnd =
std::pair<HloSendRecvInstruction*, HloSendRecvInstruction*>;
static constexpr int kUnpipelinedNodeIdx = 0;
static constexpr int kPipelinedChildNodeIdx = 0;
static constexpr int kPipelinedParentNodeIdx = 1;
struct P2PGroup {
absl::Status RecordP2POpForUnpipelinedGroup(HloSendRecvInstruction* p2p) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind != kUnpipelined) {
return Internal("Expected unpipelined group");
}
P2PGroupNode& node = nodes[kUnpipelinedNodeIdx];
if (!node.RecordP2POp(p2p)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
absl::Status RecordP2POpForPipelinedGroup(HloSendRecvInstruction* p2p) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind == kUnpipelined) {
if (nodes[kPipelinedParentNodeIdx].computation != nullptr) {
return Internal("Expected unpipelined group");
}
kind = kPipelined;
}
P2PGroupNode& node = nodes[kPipelinedParentNodeIdx];
if (!node.RecordP2POp(p2p)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
absl::Status RecordWhileOpToPipelinedGroup(HloInstruction* while_op) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind == kUnpipelined) {
return Internal("Expected pipelined group");
}
P2PGroupNode& node = nodes[kPipelinedParentNodeIdx];
if (!node.RecordWhileOp(while_op)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
bool RecordRuntimeStream() {
P2PRuntimeStream child_stream =
nodes[kPipelinedChildNodeIdx].GetRuntimeStream();
if (kind == kPipelined) {
P2PRuntimeStream parent_stream =
nodes[kPipelinedParentNodeIdx].GetRuntimeStream();
if (child_stream != parent_stream || child_stream == kUnknown) {
return false;
}
}
runtime_stream = child_stream;
return true;
}
absl::Status RecordComplementGroup(P2PGroupMap& p2p_group_map) {
CHECK(!complement_group_channel.has_value() && runtime_stream == kStream1);
for (auto& [channel, p2p_group] : p2p_group_map) {
if (&p2p_group == this ||
p2p_group.ChildComputation() != ChildComputation()) {
continue;
}
if (p2p_group.kind == kPipelined &&
p2p_group.ParentComputation() == ParentComputation()) {
if (p2p_group.runtime_stream != kStream0) {
return Internal(
"Expected different pipeline stream for complement group");
}
complement_group_channel = channel;
p2p_group.complement_group_channel = GetChannel();
} else if (p2p_group.kind == kUnpipelined &&
p2p_group.runtime_stream == kStream0) {
complement_group_channel = channel;
p2p_group.complement_group_channel = GetChannel();
}
}
return absl::OkStatus();
}
HloComputation* ParentComputation() const { return GetParent().computation; }
HloComputation* ChildComputation() const { return GetChild().computation; }
int64_t GetChannel() const { return nodes[kUnpipelinedNodeIdx].GetChannel(); }
P2PGroupNode& GetChild() { return nodes[kPipelinedChildNodeIdx]; }
P2PGroupNode& GetParent() { return nodes[kPipelinedParentNodeIdx]; }
const P2PGroupNode& GetChild() const { return nodes[kPipelinedChildNodeIdx]; }
const P2PGroupNode& GetParent() const {
return nodes[kPipelinedParentNodeIdx];
}
ChainStartEnd GetChainStartEnd(const HloComputation* computation,
const P2PGroupMap& p2p_group_map) const {
if (computation == ChildComputation()) {
if (!InCycle()) {
return std::make_pair(GetChild().recv, GetChild().send_done);
}
if (runtime_stream == kStream1) {
return std::make_pair(
GetComplementGroup(p2p_group_map)->GetChild().recv,
GetChild().send_done);
}
return std::make_pair(
GetChild().recv,
GetComplementGroup(p2p_group_map)->GetChild().send_done);
}
CHECK(kind == kPipelined && computation == ParentComputation());
if (!InCycle()) {
return std::make_pair(GetParent().recv, GetParent().send_done);
}
if (runtime_stream == kStream1) {
return std::make_pair(GetComplementGroup(p2p_group_map)->GetParent().recv,
GetParent().send_done);
}
return std::make_pair(
GetParent().recv,
GetComplementGroup(p2p_group_map)->GetParent().send_done);
}
HloInstruction* GetWhileOp() const {
return nodes[kPipelinedParentNodeIdx].while_loop;
}
bool InCycle() const { return complement_group_channel.has_value(); }
P2PGroup* GetComplementGroup(P2PGroupMap& p2p_group_map) const {
CHECK(InCycle());
return &p2p_group_map.at(*complement_group_channel);
}
const P2PGroup* GetComplementGroup(const P2PGroupMap& p2p_group_map) const {
CHECK(InCycle());
return &p2p_group_map.at(*complement_group_channel);
}
P2PGroupKind kind = kUnpipelined;
P2PGroupNode nodes[2];
P2PRuntimeStream runtime_stream = kUnknown;
std::optional<int64_t> complement_group_channel = std::nullopt;
};
bool MayInvokeCollectiveOp(
const HloInstruction* hlo,
const CollectiveInComputation& collective_in_computation) {
if (IsCollectiveOp(hlo)) {
return true;
}
for (auto callee : hlo->called_computations()) {
auto collective_in_comp = collective_in_computation.find(callee);
if (collective_in_comp != collective_in_computation.end() &&
collective_in_comp->second) {
return true;
}
}
return false;
}
absl::Status MayAddWhileOpToPipelinedGroup(HloInstruction* while_op,
P2PInComputation& p2p_in_computation,
P2PGroupMap& p2p_group_map) {
if (while_op->while_init()->opcode() != HloOpcode::kTuple) {
return absl::OkStatus();
}
HloComputation* body = while_op->called_computations()[0];
auto p2p_in_while = p2p_in_computation.find(body);
if (p2p_in_while == p2p_in_computation.end()) {
return absl::OkStatus();
}
int pipelined_group = 0;
for (auto hlo : while_op->while_init()->operands()) {
if (hlo->opcode() != HloOpcode::kSendDone) {
continue;
}
int64_t channel_id = hlo->channel_id().value();
if (p2p_in_while->second.find(channel_id) == p2p_in_while->second.end()) {
continue;
}
auto group = p2p_group_map.find(channel_id);
if (group == p2p_group_map.end() || group->second.kind != kPipelined) {
continue;
}
pipelined_group++;
if (pipelined_group > 2) {
return Internal(
"Expecting up to two pipelined P2P groups for each while-loop");
}
TF_RETURN_IF_ERROR(group->second.RecordWhileOpToPipelinedGroup(while_op));
}
return absl::OkStatus();
}
absl::Status OrderBefore(HloInstruction* i1, HloInstruction* i2) {
TF_RETURN_IF_ERROR(i1->AddControlDependencyTo(i2));
VLOG(10) << "Add control predecessor " << i2->ToString();
return absl::OkStatus();
}
absl::Status ConnectP2P1NodeChain(const P2PGroupNode& node) {
HloRecvDoneInstruction* recv_done = node.recv_done;
HloRecvInstruction* recv = node.recv;
HloSendDoneInstruction* send_done = node.send_done;
HloSendInstruction* send = node.send;
TF_RETURN_IF_ERROR(OrderBefore(recv, send));
TF_RETURN_IF_ERROR(OrderBefore(send, recv_done));
TF_RETURN_IF_ERROR(OrderBefore(recv_done, send_done));
return absl::OkStatus();
}
absl::Status ConnectUnpipelinedP2P(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetChild());
}
absl::Status ConnectPipelined1P2PChild(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetChild());
}
absl::Status ConnectP2P2NodeChain(const P2PGroupNode& node0,
const P2PGroupNode& node1) {
HloSendRecvInstruction* recv_done0 = node0.recv_done;
HloRecvInstruction* recv0 = node0.recv;
HloSendRecvInstruction* send_done0 = node0.send_done;
HloSendInstruction* send0 = node0.send;
HloSendRecvInstruction* recv_done1 = node1.recv_done;
HloRecvInstruction* recv1 = node1.recv;
HloSendRecvInstruction* send_done1 = node1.send_done;
HloSendInstruction* send1 = node1.send;
TF_RETURN_IF_ERROR(OrderBefore(recv_done0, recv_done1));
TF_RETURN_IF_ERROR(OrderBefore(recv_done1, send_done0));
TF_RETURN_IF_ERROR(OrderBefore(send_done0, send_done1));
TF_RETURN_IF_ERROR(OrderBefore(recv0, send0));
TF_RETURN_IF_ERROR(OrderBefore(send0, recv1));
TF_RETURN_IF_ERROR(OrderBefore(recv1, send1));
TF_RETURN_IF_ERROR(OrderBefore(send1, recv_done0));
return absl::OkStatus();
}
absl::Status ConnectPipelined2P2PChild(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetChild(),
p2p_group.GetChild());
}
absl::Status ConnectPipelined1P2PParent(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetParent());
}
absl::Status ConnectPipelined2P2PParent(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetParent(),
p2p_group.GetParent());
}
absl::Status ConnectUnpipelined2P2P(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
CHECK(p2p_group.runtime_stream == kStream1);
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetChild(),
p2p_group.GetChild());
}
absl::Status GatherP2PGroupsAndCollectiveInfo(
const HloComputation* computation, P2PInComputation& p2p_in_computation,
P2PGroupMap& p2p_group_map,
CollectiveInComputation& collective_in_computation) {
collective_in_computation[computation] = false;
std::vector<HloInstruction*> while_ops;
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
collective_in_computation[computation] = true;
}
if (hlo->opcode() == HloOpcode::kWhile) {
while_ops.push_back(hlo);
continue;
}
if (!IsP2POp(hlo)) {
continue;
}
HloSendRecvInstruction* p2p = Cast<HloSendRecvInstruction>(hlo);
int64_t channel = p2p->channel_id().value();
auto p2p_group = p2p_group_map.find(channel);
if (p2p_group == p2p_group_map.end()) {
P2PGroup group;
TF_RETURN_IF_ERROR(group.RecordP2POpForUnpipelinedGroup(p2p));
p2p_group_map[channel] = group;
} else {
P2PGroup& group = p2p_group->second;
if (group.ChildComputation() == computation) {
TF_RETURN_IF_ERROR(group.RecordP2POpForUnpipelinedGroup(p2p));
} else {
TF_RETURN_IF_ERROR(group.RecordP2POpForPipelinedGroup(p2p));
}
}
auto p2p_in_comp = p2p_in_computation.find(computation);
if (p2p_in_comp == p2p_in_computation.end()) {
p2p_in_computation[computation] = {channel};
} else {
p2p_in_comp->second.insert(channel);
}
}
for (auto hlo : while_ops) {
TF_RETURN_IF_ERROR(
MayAddWhileOpToPipelinedGroup(hlo, p2p_in_computation, p2p_group_map));
}
for (auto& [channel, p2p_group] : p2p_group_map) {
if (p2p_group.kind == kUnpipelined) {
if (p2p_group.nodes[kUnpipelinedNodeIdx].Incomplete() ||
!p2p_group.RecordRuntimeStream()) {
p2p_group.kind = kUnrecognized;
}
} else if (p2p_group.kind == kPipelined) {
if (p2p_group.nodes[kPipelinedChildNodeIdx].Incomplete() ||
p2p_group.nodes[kPipelinedParentNodeIdx]
.IncompletePipelinedParent() ||
!p2p_group.RecordRuntimeStream()) {
p2p_group.kind = kUnrecognized;
}
}
}
absl::erase_if(p2p_group_map, [](const auto& p2p_group) {
return p2p_group.second.kind == kUnrecognized;
});
for (auto& [channel, p2p_group] : p2p_group_map) {
if ((p2p_group.kind == kPipelined &&
p2p_group.ParentComputation() != computation) ||
p2p_group.InCycle() || p2p_group.runtime_stream != kStream1) {
continue;
}
TF_RETURN_IF_ERROR(p2p_group.RecordComplementGroup(p2p_group_map));
}
return absl::OkStatus();
}
absl::StatusOr<std::pair<int, const P2PGroup*>> ConnectP2PChain(
HloComputation* computation, const P2PGroupMap& p2p_group_map,
const std::set<int64_t>& p2p_channels) {
const P2PGroup* pipelined_group = nullptr;
int num_p2p_chains = 0;
for (int64_t channel : p2p_channels) {
auto it = p2p_group_map.find(channel);
if (it == p2p_group_map.end()) {
continue;
}
num_p2p_chains++;
const P2PGroup& p2p_group = it->second;
P2PGroupKind kind = p2p_group.kind;
if (kind == P2PGroupKind::kUnpipelined) {
if (!p2p_group.InCycle()) {
TF_RETURN_IF_ERROR(ConnectUnpipelinedP2P(p2p_group));
} else if (p2p_group.runtime_stream == kStream1) {
TF_RETURN_IF_ERROR(ConnectUnpipelined2P2P(p2p_group, p2p_group_map));
}
continue;
}
if (!p2p_group.InCycle()) {
if (computation == p2p_group.ParentComputation()) {
TF_RETURN_IF_ERROR(ConnectPipelined1P2PParent(p2p_group));
} else {
if (pipelined_group != nullptr) {
return Internal("Expected <=1 pipelined group in a while-body");
}
pipelined_group = &p2p_group;
TF_RETURN_IF_ERROR(ConnectPipelined1P2PChild(p2p_group));
}
continue;
}
if (p2p_group.runtime_stream != kStream1) {
continue;
}
if (computation == p2p_group.ParentComputation()) {
TF_RETURN_IF_ERROR(ConnectPipelined2P2PParent(p2p_group, p2p_group_map));
} else {
if (pipelined_group != nullptr) {
return Internal(
"Expected only two pipelined groups forming a cycle in a "
"while-body");
}
pipelined_group = &p2p_group;
TF_RETURN_IF_ERROR(ConnectPipelined2P2PChild(p2p_group, p2p_group_map));
}
}
return std::make_pair(num_p2p_chains, pipelined_group);
}
absl::Status OrderBefore(HloReachabilityMap* reachability, HloInstruction* a,
HloInstruction* b) {
VLOG(10) << "OrderBefore " << a->ToString() << " " << b->ToString();
if (!reachability->IsReachable(a, b)) {
TF_RETURN_IF_ERROR(a->AddControlDependencyTo(b));
VLOG(10) << "add control predecessor " << b->ToString();
reachability->UpdateReachabilityThroughInstruction(b);
}
return absl::OkStatus();
}
absl::Status LinearizeCollectivesWithOtherP2P(
const P2PGroupMap& p2p_group_map, const P2PGroup& group,
const CollectiveInComputation& collective_in_computation,
const std::vector<HloInstruction*>::iterator& chain_start_iter,
const std::vector<HloInstruction*>::iterator& begin_iter,
const std::vector<HloInstruction*>::iterator& end_iter,
HloReachabilityMap* reachability) {
HloComputation* computation = (*chain_start_iter)->parent();
ChainStartEnd start_end = group.GetChainStartEnd(computation, p2p_group_map);
for (auto it = begin_iter; it != end_iter; ++it) {
HloInstruction* hlo = *it;
if (IsP2POp(hlo)) {
auto group_it = p2p_group_map.find(hlo->channel_id().value());
if (group_it == p2p_group_map.end()) {
continue;
}
const P2PGroup& cur_group = group_it->second;
P2PGroupKind kind = cur_group.kind;
if (kind == P2PGroupKind::kPipelined &&
computation == cur_group.ChildComputation()) {
continue;
}
ChainStartEnd cur_start_end =
cur_group.GetChainStartEnd(computation, p2p_group_map);
if (cur_start_end.first != hlo) {
continue;
}
if (it <= chain_start_iter) {
continue;
}
if (reachability->IsReachable(start_end.first, cur_start_end.second)) {
TF_RETURN_IF_ERROR(
OrderBefore(reachability, start_end.second, cur_start_end.first));
} else {
TF_RETURN_IF_ERROR(
OrderBefore(reachability, cur_start_end.second, start_end.first));
}
continue;
}
if (!MayInvokeCollectiveOp(hlo, collective_in_computation)) {
continue;
}
if (hlo->opcode() == HloOpcode::kWhile &&
group.kind == P2PGroupKind::kPipelined && group.GetWhileOp() == hlo) {
continue;
}
if (hlo_query::IsAsyncCollectiveDoneOp(hlo, false)) {
if (reachability->IsReachable(start_end.first, hlo)) {
TF_RETURN_IF_ERROR(OrderBefore(reachability, start_end.second,
GetStartOpForDoneOp(hlo)));
} else {
TF_RETURN_IF_ERROR(OrderBefore(reachability, hlo, start_end.first));
}
}
if (reachability->IsReachable(start_end.first, hlo)) {
TF_RETURN_IF_ERROR(OrderBefore(reachability, start_end.second, hlo));
} else {
TF_RETURN_IF_ERROR(OrderBefore(reachability, hlo, start_end.first));
}
}
return absl::OkStatus();
}
absl::Status LinearizeCollectivesWithPipelinedP2PChild(
const P2PGroupMap& p2p_group_map, const P2PGroup& group,
const CollectiveInComputation& collective_in_computation,
HloComputation* computation, HloReachabilityMap* reachability) {
ChainStartEnd start_end = group.GetChainStartEnd(computation, p2p_group_map);
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (!MayInvokeCollectiveOp(hlo, collective_in_computation)) {
continue;
}
HloOpcode opcode = hlo->opcode();
if (IsP2POp(hlo) && opcode != HloOpcode::kSendDone) {
continue;
}
if (hlo->opcode() == HloOpcode::kSendDone) {
auto group_it = p2p_group_map.find(hlo->channel_id().value());
if (group_it == p2p_group_map.end()) {
continue;
}
const P2PGroup& cur_group = group_it->second;
P2PGroupKind kind = cur_group.kind;
if (kind == P2PGroupKind::kPipelined &&
computation == cur_group.ChildComputation()) {
continue;
}
ChainStartEnd cur_start_end =
cur_group.GetChainStartEnd(computation, p2p_group_map);
TF_RETURN_IF_ERROR(
OrderBefore(reachability, cur_start_end.second, start_end.first));
continue;
}
TF_RETURN_IF_ERROR(OrderBefore(reachability, hlo, start_end.first));
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> P2PSchedulePreparation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
P2PGroupMap p2p_group_map;
P2PInComputation p2p_in_computation;
CollectiveInComputation collective_in_computation;
std::vector<HloComputation*> all_computations =
module->MakeComputationPostOrder(execution_threads);
for (auto iter = all_computations.begin(); iter != all_computations.end();
++iter) {
VLOG(10) << "Gathering P2P groups and collective info for computation "
<< (*iter)->name();
TF_RETURN_IF_ERROR(GatherP2PGroupsAndCollectiveInfo(
*iter, p2p_in_computation, p2p_group_map, collective_in_computation));
}
if (p2p_group_map.empty()) {
return false;
}
for (auto iter = all_computations.rbegin(); iter != all_computations.rend();
++iter) {
HloComputation* computation = *iter;
auto p2p_in_comp = p2p_in_computation.find(computation);
if (p2p_in_comp == p2p_in_computation.end()) {
continue;
}
std::set<int64_t>& p2p_channels = p2p_in_comp->second;
TF_ASSIGN_OR_RETURN(
auto result, ConnectP2PChain(computation, p2p_group_map, p2p_channels));
if (result.first == 0) {
continue;
}
VLOG(10) << "Processing computation " << computation->name()
<< " num_p2p_chains " << result.first;
std::unique_ptr<HloReachabilityMap> reachability =
HloReachabilityMap::Build(computation);
if (result.second != nullptr) {
TF_RETURN_IF_ERROR(LinearizeCollectivesWithPipelinedP2PChild(
p2p_group_map, *result.second, collective_in_computation, computation,
reachability.get()));
}
std::vector<HloInstruction*> all_instructions =
computation->MakeInstructionPostOrder();
std::vector<HloInstruction*>::iterator begin = all_instructions.begin();
std::vector<HloInstruction*>::iterator end = all_instructions.end();
for (auto instr_it = begin; instr_it != end; ++instr_it) {
HloInstruction* hlo = *instr_it;
if (!IsP2POp(hlo)) {
continue;
}
HloSendRecvInstruction* p2p = Cast<HloSendRecvInstruction>(hlo);
int64_t channel = p2p->channel_id().value();
auto group_it = p2p_group_map.find(channel);
if (group_it == p2p_group_map.end()) {
continue;
}
P2PGroup& group = group_it->second;
P2PGroupKind kind = group.kind;
if (kind == P2PGroupKind::kPipelined &&
computation == group.ChildComputation()) {
continue;
}
ChainStartEnd start_end =
group.GetChainStartEnd(computation, p2p_group_map);
if (start_end.first != hlo) {
continue;
}
VLOG(10) << "linearize other collectives with respect to channel "
<< hlo->ToString();
TF_RETURN_IF_ERROR(LinearizeCollectivesWithOtherP2P(
p2p_group_map, group, collective_in_computation, instr_it, begin, end,
reachability.get()));
VLOG(10) << "finish connect other collectives with channel ";
}
}
return true;
}
} | #include "xla/service/p2p_schedule_preparation.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class P2PSchedulePreparationTest : public HloTestBase {
public:
void VerifyP2PNotTransformed(HloModule* module,
const std::string& suffix = "") {
HloInstruction* recv = FindInstruction(module, "recv" + suffix);
HloInstruction* recv_done = FindInstruction(module, "recv-done" + suffix);
HloInstruction* send_done = FindInstruction(module, "send-done" + suffix);
EXPECT_EQ(recv->control_predecessors().size(), 0);
EXPECT_EQ(recv_done->control_predecessors().size(), 0);
EXPECT_EQ(send_done->control_predecessors().size(), 0);
}
void VerifyP2P1GroupChain(HloModule* module, const std::string& suffix) {
HloInstruction* send = FindInstruction(module, "send" + suffix);
HloInstruction* recv = FindInstruction(module, "recv" + suffix);
HloInstruction* recv_done = FindInstruction(module, "recv-done" + suffix);
HloInstruction* send_done = FindInstruction(module, "send-done" + suffix);
EXPECT_EQ(send->control_predecessors()[0], recv);
EXPECT_EQ(recv_done->control_predecessors()[0], send);
EXPECT_EQ(send_done->control_predecessors()[0], recv_done);
}
void VerifyUnpipelinedP2P(HloModule* module, const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyPipelinedP2PChild(HloModule* module,
const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyPipelinedP2PParent(HloModule* module,
const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyP2P2GroupChain(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
HloInstruction* send0 = FindInstruction(module, "send" + suffix0);
HloInstruction* recv0 = FindInstruction(module, "recv" + suffix0);
HloInstruction* recv_done0 = FindInstruction(module, "recv-done" + suffix0);
HloInstruction* send_done0 = FindInstruction(module, "send-done" + suffix0);
HloInstruction* send1 = FindInstruction(module, "send" + suffix1);
HloInstruction* recv1 = FindInstruction(module, "recv" + suffix1);
HloInstruction* recv_done1 = FindInstruction(module, "recv-done" + suffix1);
HloInstruction* send_done1 = FindInstruction(module, "send-done" + suffix1);
EXPECT_EQ(recv_done1->control_predecessors()[0], recv_done0);
EXPECT_EQ(send_done0->control_predecessors()[0], recv_done1);
EXPECT_EQ(send_done1->control_predecessors()[0], send_done0);
EXPECT_EQ(send0->control_predecessors()[0], recv0);
EXPECT_EQ(recv1->control_predecessors()[0], send0);
EXPECT_EQ(send1->control_predecessors()[0], recv1);
EXPECT_EQ(recv_done0->control_predecessors()[0], send1);
}
void VerifyPipelined2P2PChild(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
VerifyP2P2GroupChain(module, suffix0, suffix1);
}
void VerifyPipelined2P2PParent(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
VerifyP2P2GroupChain(module, suffix0, suffix1);
}
};
constexpr char kEmpty[] = "";
constexpr char kHostTransfer[] = ", is_host_transfer=true";
std::string GetUnnestedP2PModuleString(bool is_host = false,
bool incomplete = false) {
constexpr char kSend[] = R"(
send = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
} %s
send-done = token[] send-done(send), channel_id=2 %s
)";
constexpr char kSimpleModule[] = R"(
HloModule test
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
} %s
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2 %s
%s
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
}
)";
const char* is_host_str = is_host ? kHostTransfer : kEmpty;
if (incomplete) {
return absl::StrFormat(kSimpleModule, is_host_str, is_host_str, kEmpty);
}
std::string send_str = absl::StrFormat(kSend, is_host_str, is_host_str);
return absl::StrFormat(kSimpleModule, is_host_str, is_host_str, send_str);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainHostNotTransformed) {
std::string kModuleStr = GetUnnestedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainIncompleteNotTransformed) {
std::string kModuleStr =
GetUnnestedP2PModuleString(false, true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainTransformed) {
std::string kModuleStr = GetUnnestedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyUnpipelinedP2P(module.get());
}
std::string GetNestedP2PModuleString(bool while_p2p_is_host = false,
bool main_p2p_is_host = false) {
constexpr char kModuleTemplate[] = R"(
HloModule test
while-cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}"
} %s
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1 %s
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
send-done = token[] send-done(send), channel_id=1 %s
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
ROOT body-result = (u32[], f32[1, 1024, 1024]) tuple(new-count, recv-data)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=2 %s
send-done.1 = token[] send-done(send.1), channel_id=2 %s
recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1), index=0
while-init = (u32[], f32[1, 1024, 1024]) tuple(c0, recv-data.1)
while-result = (u32[], f32[1, 1024, 1024]) while(while-init),
body=while-body, condition=while-cond
while-result-data = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
ROOT entry-result = f32[1, 1024, 1024] add(while-result-data, recv-data.1)
}
)";
const char* while_p2p = while_p2p_is_host ? kHostTransfer : kEmpty;
const char* main_p2p = main_p2p_is_host ? kHostTransfer : kEmpty;
return absl::StrFormat(kModuleTemplate, while_p2p, while_p2p, while_p2p,
while_p2p, main_p2p, main_p2p, main_p2p, main_p2p);
}
TEST_F(P2PSchedulePreparationTest, WhileP2PIsHostNotMainTransformed) {
std::string kModuleStr = GetNestedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyP2PNotTransformed(module.get());
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* send_done = FindInstruction(module.get(), "send-done.1");
HloInstruction* while_loop = FindInstruction(module.get(), "while-result");
EXPECT_EQ(while_loop->control_predecessors()[0], send_done);
}
TEST_F(P2PSchedulePreparationTest, MainP2PIsHostNotWhileTransformed) {
std::string kModuleStr = GetNestedP2PModuleString(false,
true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyUnpipelinedP2P(module.get());
VerifyP2PNotTransformed(module.get(), ".1");
}
TEST_F(P2PSchedulePreparationTest, NestedP2PChainTransformed) {
std::string kModuleStr = GetNestedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyUnpipelinedP2P(module.get());
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* send_done = FindInstruction(module.get(), "send-done.1");
HloInstruction* recv_user = FindInstruction(module.get(), "while-result");
EXPECT_EQ(recv_user->control_predecessors()[0], send_done);
}
std::string GetPipelinedP2PModuleString(bool nested_p2p_in_main = false,
bool other_p2p_in_while = false,
bool test_custom_call = false) {
constexpr char kWhileForMain[] = R"(
while-cond-2 {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result-2 = pred[] compare(count, ub), direction=LT
}
while-body-2 {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.3 = token[] after-all()
recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=3,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}"
}
send.3 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.3),
channel_id=3, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}
recv-done.3 = (f32[1, 1024, 1024], token[]) recv-done(recv.3), channel_id=3
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.3), index=0
send-done.3 = token[] send-done(send.3), channel_id=3
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
ROOT body-result-2 = (u32[], f32[1, 1024, 1024]) tuple(new-count, recv-data)
}
)";
constexpr char kUnnestedResult[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
collective-permute.2 = f32[1, 1024, 1024] collective-permute(init),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, collective-permute.2)
)";
constexpr char kUnnestedResultWithCustomCall[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
custom-call = f32[1, 1024, 1024] custom-call(init),
custom_call_target="my_custom_call"
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, custom-call)
)";
constexpr char kNestedResult[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
while-init-2 = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while-2 = (u32[], f32[1, 1024, 1024]) while(while-init-2),
body=while-body-2, condition=while-cond-2,
backend_config={"known_trip_count":{"n":"25"}}
while-result-2 = f32[1, 1024, 1024] get-tuple-element(while-2), index=1
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, while-result-2)
)";
constexpr char kPipelinedWhileBodyWithoutOtherP2P[] = R"(
while-body {
param = (u32[], (f32[1, 1024, 1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1, 1024, 1024], token[]) get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
collective-permute.1 = f32[1, 1024, 1024] collective-permute(s),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
new-data = f32[1, 1024, 1024] add(c, collective-permute.1)
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], token[]) send(new-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
)";
constexpr char kPipelinedWhileBodyWithOtherP2P[] = R"(
while-body {
param = (u32[], (f32[1, 1024, 1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1, 1024, 1024], token[])get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
collective-permute.1 = f32[1, 1024, 1024] collective-permute(s),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
send-data = f32[1, 1024, 1024] add(c, collective-permute.1)
after-all.4 = token[] after-all()
send.4 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.4),
channel_id=4, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"
}
send-done.4 = token[] send-done(send.4), channel_id=4
recv.4 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.4), channel_id=4,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"
}
recv-done.4 = (f32[1, 1024, 1024], token[]) recv-done(recv.4), channel_id=4
new-data = f32[1, 1024, 1024] get-tuple-element(recv-done.4), index=0
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], token[]) send(new-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
)";
constexpr char kModuleTemplate[] = R"(
HloModule test
while-cond {
param = (u32[], (f32[1, 1024, 1024], u32[], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
%s
%s
ENTRY test-computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (f32[1, 1024, 1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], token[]) send(init, after-all.2),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(c0, recv-done.2, send-done.2)
while-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
while(while-init),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1, 1024, 1024], token[]) get-tuple-element(while-result), index=1
recv-data.2.q = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
%s
}
)";
const char* while_str = nested_p2p_in_main ? kWhileForMain : kEmpty;
const char* pipelined_while_body_str =
other_p2p_in_while ? kPipelinedWhileBodyWithOtherP2P
: kPipelinedWhileBodyWithoutOtherP2P;
const char* result_str =
nested_p2p_in_main ? kNestedResult
: (test_custom_call ? kUnnestedResultWithCustomCall
: kUnnestedResult);
return absl::StrFormat(kModuleTemplate, while_str, pipelined_while_body_str,
result_str);
}
TEST_F(P2PSchedulePreparationTest, UnnestedPipelinedP2PChainTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
HloInstruction* recv_1 = FindInstruction(module.get(), "recv.1");
HloInstruction* collective_1 =
FindInstruction(module.get(), "collective-permute.1");
EXPECT_EQ(recv_1->control_predecessors()[0], collective_1);
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* collective_2 =
FindInstruction(module.get(), "collective-permute.2");
EXPECT_TRUE((!collective_2->control_predecessors().empty() &&
collective_2->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == collective_2));
}
TEST_F(P2PSchedulePreparationTest, NestedPipelinedP2PChainTransformed) {
std::string kModuleStr =
GetPipelinedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
VerifyUnpipelinedP2P(module.get(), ".3");
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* while_2 = FindInstruction(module.get(), "while-2");
EXPECT_TRUE((!while_2->control_predecessors().empty() &&
while_2->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == while_2));
}
TEST_F(P2PSchedulePreparationTest,
UnnestedPipelinedP2PChainWithOtherP2PTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString(
false, true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
VerifyUnpipelinedP2P(module.get(), ".4");
HloInstruction* pipelined_recv = FindInstruction(module.get(), "recv.1");
HloInstruction* other_send_done =
FindInstruction(module.get(), "send-done.4");
EXPECT_EQ(1, absl::c_count(pipelined_recv->control_predecessors(),
other_send_done));
}
TEST_F(P2PSchedulePreparationTest,
UnnestedPipelinedP2PChainWithCustomCallTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString(
false, false,
true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
EXPECT_TRUE((!custom_call->control_predecessors().empty() &&
custom_call->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == custom_call));
}
TEST_F(P2PSchedulePreparationTest, PipelinedP2PChain2Transformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(10)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.0.f = (u32[2], token[]) get-tuple-element(param), index=1
recv-data.0 = u32[2] get-tuple-element(recv-done.0.f), index=0
recv-done.1.f = (u32[2], token[]) get-tuple-element(param), index=2
recv-data.1 = u32[2] get-tuple-element(recv-done.1.f), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(s, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1.n = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1.n), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (u32[2], u32[], token[]) send(s, after-all.1.n),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
ROOT result = (u32[], (u32[2], token[]), (u32[2], token[]), token[], token[])
tuple(new_count, recv-done.0, recv-done.1, send-done.0, send-done.1)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
after-all.0.p = token[] after-all()
recv.2 = (u32[2], u32[], token[]) recv(after-all.0.p), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.2 = (u32[2], u32[], token[]) send(init, after-all.0.p),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (u32[2], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1.p = token[] after-all()
recv.3 = (u32[2], u32[], token[]) recv(after-all.1.p), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.3 = (u32[2], u32[], token[]) send(init, after-all.1.p),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.3 = (u32[2], token[]) recv-done(recv.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.3 = token[] send-done(send.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
while_init = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) tuple(c0, recv-done.2, recv-done.3, send-done.2, send-done.3)
while_result = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[]),
token[], token[]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"10"}}
recv-done.0.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=1
recv-data.0.q = u32[2] get-tuple-element(recv-done.0.q), index=0
recv-done.1.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=2
recv-data.1.q = u32[2] get-tuple-element(recv-done.1.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0.q, recv-data.1.q)
s = u32[2] add(c1, recv-data)
ROOT result = u32[2] add(s, recv-data)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
VerifyPipelined2P2PChild(module.get(), ".0", ".1");
VerifyPipelined2P2PParent(module.get(), ".2", ".3");
}
TEST_F(P2PSchedulePreparationTest, UnpipelinedP2PChain2Transformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = u32[2] get-tuple-element(param), index=1
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"11"}}
ROOT recv-data = u32[2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyP2P2GroupChain(module.get(), ".0", ".1");
}
TEST_F(P2PSchedulePreparationTest, Unpipelined2SeparatedChainTransformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = u32[2] get-tuple-element(param), index=1
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2}}"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2}}"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2
send-done.1 = token[] send-done(send.1), channel_id=2
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"11"}}
ROOT recv-data = u32[2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyUnpipelinedP2P(module.get(), ".0");
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* recv0 = FindInstruction(module.get(), "recv.0");
if (!recv0->control_predecessors().empty()) {
HloInstruction* send_done1 = FindInstruction(module.get(), "send-done.1");
EXPECT_EQ(recv0->control_predecessors()[0], send_done1);
} else {
HloInstruction* recv1 = FindInstruction(module.get(), "recv.1");
HloInstruction* send_done0 = FindInstruction(module.get(), "send-done.0");
EXPECT_TRUE(!recv1->control_predecessors().empty());
EXPECT_EQ(recv1->control_predecessors()[0], send_done0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/p2p_schedule_preparation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/p2p_schedule_preparation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1fad3ef6-ae0a-42aa-9a5c-bfca8baf0107 | cpp | tensorflow/tensorflow | while_loop_unroller | third_party/xla/xla/service/while_loop_unroller.cc | third_party/xla/xla/service/while_loop_unroller_test.cc | #include "xla/service/while_loop_unroller.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/overflow_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using hlo_query::ContainsInstrWithOpcode;
std::unique_ptr<HloComputation> MakeTrivialLoopCondition(
HloInstruction* while_op, std::string_view name, int64_t induction_idx,
int64_t init_value) {
auto condition_builder = HloComputation::Builder(name);
absl::StatusOr<HloInstruction*> param_instruction =
condition_builder.AddParameter(
while_op->while_condition()->parameter_instruction(0)->Clone());
HloInstruction* indvar_instruction =
condition_builder.AddInstruction(HloInstruction::CreateGetTupleElement(
param_instruction.value(), induction_idx));
HloInstruction* init_value_constant = condition_builder.AddInstruction(
MakeScalarConstantWithShape(indvar_instruction->shape(), init_value));
return condition_builder.Build(
condition_builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PrimitiveType::PRED, {}), indvar_instruction,
init_value_constant, ComparisonDirection::kLe)));
}
absl::Status HandleDynamicGteOrTuple(HloInstruction* instr) {
if (instr->IsCustomCall("DynamicGte")) {
HloEvaluator evaluator(0);
TF_ASSIGN_OR_RETURN(
Literal index_lit,
evaluator.Evaluate(instr->mutable_operand(1),
{},
true));
auto index = LiteralUtil::LiteralAsScalarInt64(std::move(index_lit));
TF_RET_CHECK(index.has_value());
return instr->parent()->ReplaceInstruction(
instr, instr->AddInstruction(HloInstruction::CreateGetTupleElement(
instr->mutable_operand(0), index.value())));
} else if (instr->IsCustomCall("DynamicTuple")) {
HloEvaluator evaluator(0);
std::vector<HloInstruction*> tuple_operands;
TF_ASSIGN_OR_RETURN(
Literal index_lit,
evaluator.Evaluate(instr->mutable_operand(2),
{},
true));
auto index = LiteralUtil::LiteralAsScalarInt64(std::move(index_lit));
TF_RET_CHECK(index.has_value());
for (int64_t i = 0; i < instr->operand(0)->shape().tuple_shapes_size();
i++) {
if (i == index.value()) {
tuple_operands.push_back(instr->mutable_operand(1));
} else {
HloInstruction* slice =
instr->AddInstruction(HloInstruction::CreateGetTupleElement(
instr->mutable_operand(0), i));
tuple_operands.push_back(slice);
}
}
return instr->parent()->ReplaceInstruction(
instr,
instr->AddInstruction(HloInstruction::CreateTuple(tuple_operands)));
}
return absl::OkStatus();
}
absl::Status ReplaceInductionVarUses(HloComputation* body,
HloInstruction* induction_value_constant,
int64_t induction_var_idx) {
for (HloInstruction* body_inst : body->instructions()) {
if (!Match(body_inst,
match::GetTupleElement(match::Parameter().WithParameterNum(0))
.WithTupleIndex(induction_var_idx))) {
continue;
}
std::vector<HloInstruction*> indvar_uses;
indvar_uses.reserve(body_inst->users().size());
for (HloInstruction* indvar_use : body_inst->users()) {
indvar_uses.push_back(indvar_use);
}
for (HloInstruction* indvar_use : indvar_uses) {
if (Match(indvar_use, match::Add(match::GetTupleElement().WithTupleIndex(
induction_var_idx),
match::Constant()))) {
continue;
}
for (int64_t i = 0; i < indvar_use->operand_count(); ++i) {
const HloInstruction* indvar_use_operand = indvar_use->operand(i);
if (indvar_use_operand == body_inst) {
TF_RETURN_IF_ERROR(
indvar_use->ReplaceOperandWith(i, induction_value_constant));
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<HloComputation>>
UnrollSingleIterationOfTrivialLoop(HloInstruction* while_op,
WhileLoopConfig config,
const int64_t induction_value) {
std::unique_ptr<HloComputation> while_body_clone =
while_op->while_body()->Clone(
absl::StrCat(while_op->name(), induction_value));
HloInstruction* induction_var_hlo =
while_op->mutable_operand(0)->mutable_operand(config.induction_var_idx);
int64_t unique_channel_id = hlo_query::NextChannelId(*while_op->GetModule());
HloInstruction* induction_value_constant = while_body_clone->AddInstruction(
MakeScalarConstantWithShape(induction_var_hlo->shape(), induction_value));
TF_RETURN_IF_ERROR(ReplaceInductionVarUses(while_body_clone.get(),
induction_value_constant,
config.induction_var_idx));
for (HloInstruction* body_inst : while_body_clone->instructions()) {
HloInstruction* collective = IsOrHasCollectiveWithChannelId(body_inst);
if (collective != nullptr) {
collective->set_channel_id(unique_channel_id++);
}
TF_RETURN_IF_ERROR(HandleDynamicGteOrTuple(body_inst));
}
return while_body_clone;
}
bool InitialFeasibilityCheck(const HloInstruction* while_op,
const WhileLoopConfig config,
const UnrollConfig unroll_config) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
VLOG(5) << "Trying to unroll " << while_op->ToShortString();
if (while_op->while_body()->instruction_count() >
unroll_config.instruction_count_threshold) {
VLOG(5) << absl::StrCat(
"Cannot unroll while loop. Too many instructions in the body: ",
while_op->while_body()->instruction_count());
return false;
}
if (config.trip_count > unroll_config.trip_count_threshold) {
VLOG(5) << absl::StrCat(
"Cannot unroll while loop. The trip count is greater "
"than the threshold: ",
config.trip_count, " vs ", unroll_config.trip_count_threshold);
return false;
}
if (config.trip_count * while_op->while_body()->instruction_count() >
unroll_config.expand_factor_threshold) {
VLOG(5) << absl::StrCat(
"Not attempting to unroll due to instruction count "
"increase explosion. New instruction count: ",
config.trip_count * while_op->while_body()->instruction_count(), " vs ",
unroll_config.expand_factor_threshold);
return false;
}
return true;
}
absl::StatusOr<bool> UnrollInternal(HloInstruction* while_op,
WhileLoopConfig config) {
VLOG(3) << "Unrolling while instruction " << while_op->ToShortString()
<< " with body instruction count "
<< while_op->while_body()->instruction_count();
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
HloInstruction* unrolled_body_call_op;
std::vector<HloInstruction*> call_operands = {while_op->operands().at(0)};
for (int64_t i = config.init; i < config.trip_count + config.init; ++i) {
CHECK(OverflowSafeAdd(i, (int64_t)1).has_value());
HloComputation* unrolled_body = module->AddEmbeddedComputation(
UnrollSingleIterationOfTrivialLoop(while_op, config, i).value());
unrolled_body_call_op =
computation->AddInstruction(HloInstruction::CreateCall(
while_op->shape(), call_operands, unrolled_body));
call_operands.clear();
call_operands.emplace_back(unrolled_body_call_op);
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(while_op, unrolled_body_call_op));
TF_RETURN_IF_ERROR(FlattenCallGraph().Run(module).status());
return true;
}
absl::StatusOr<UnrollResult> UnrollInternalWrappedAndReturnReplacement(
HloInstruction* while_op, WhileLoopConfig config) {
VLOG(3) << "Unrolling (wrapped) while instruction "
<< while_op->ToShortString() << " with body instruction count "
<< while_op->while_body()->instruction_count();
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
HloInstruction* unrolled_body_call_op;
std::vector<HloInstruction*> call_operands;
auto body_builder =
HloComputation::Builder(absl::StrCat("unrolled-body-", while_op->name()));
absl::StatusOr<HloInstruction*> p = body_builder.AddParameter(
while_op->while_body()->parameter_instruction(0)->Clone());
call_operands.emplace_back(std::move(p.value()));
for (int64_t i = config.init; i < config.trip_count + config.init; ++i) {
CHECK(OverflowSafeAdd(i, (int64_t)1).has_value());
HloComputation* unrolled_body = module->AddEmbeddedComputation(
UnrollSingleIterationOfTrivialLoop(while_op, config, i).value());
unrolled_body_call_op = body_builder.AddInstruction(
HloInstruction::CreateCall(while_op->shape(), call_operands,
unrolled_body),
absl::StrCat(while_op->name(), "-unrolled-body-call-", i));
call_operands.clear();
call_operands.emplace_back(unrolled_body_call_op);
}
HloComputation* new_body =
module->AddEmbeddedComputation(body_builder.Build(unrolled_body_call_op));
HloComputation* new_cond =
module->AddEmbeddedComputation(MakeTrivialLoopCondition(
while_op, absl::StrCat("unrolled", while_op->name(), "-cond"),
config.induction_var_idx, config.init));
HloInstruction* new_while_op =
computation->AddInstruction(HloInstruction::CreateWhile(
while_op->shape(), new_cond, new_body, while_op->mutable_operand(0)));
while_op->SetupDerivedInstruction(new_while_op);
CHECK_OK(computation->ReplaceInstruction(while_op, new_while_op));
TF_RETURN_IF_ERROR(FlattenCallGraph().Run(module).status());
UnrollResult result;
result.unrolled = true;
result.new_while_op = new_while_op;
return result;
}
absl::StatusOr<bool> UnrollInternalWrapped(HloInstruction* while_op,
WhileLoopConfig config) {
TF_ASSIGN_OR_RETURN(
UnrollResult result,
UnrollInternalWrappedAndReturnReplacement(while_op, config));
return result.unrolled;
}
};
bool IsLoopInductionVar(const HloInstruction* instr,
const WhileLoopConfig& config) {
if (!instr->parent()->IsFusionComputation()) {
return Match(instr, match::GetTupleElement(match::Parameter(),
config.induction_var_idx));
} else {
if (!Match(instr, match::Parameter())) {
return false;
}
HloInstruction* caller_fusion = instr->parent()->FusionInstruction();
return IsLoopInductionVar(caller_fusion->operand(instr->parameter_number()),
config);
}
}
bool IsEffectivelyStatic(const HloInstruction* instr,
const WhileLoopConfig& config) {
switch (instr->opcode()) {
case HloOpcode::kConstant:
return true;
case HloOpcode::kParameter: {
if (instr->parent()->IsFusionComputation()) {
HloInstruction* caller_fusion = instr->parent()->FusionInstruction();
return IsEffectivelyStatic(
caller_fusion->operand(instr->parameter_number()), config);
}
return false;
}
case HloOpcode::kGetTupleElement: {
if (instr->parent() != config.while_instr->while_body()) {
return false;
}
if (!Match(instr, match::GetTupleElement(match::Parameter(),
config.induction_var_idx))) {
return false;
}
return true;
}
default: {
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (!IsEffectivelyStatic(instr->operand(i), config)) {
return false;
}
}
return true;
}
}
}
std::optional<int64_t> MatchEffectivelyStaticDynamicSliceInsideLoop(
const HloInstruction* instr, const HloInstruction* input,
const WhileLoopConfig& config) {
if (instr->opcode() != HloOpcode::kDynamicSlice) {
return std::nullopt;
}
int64_t start_indices_offset = 1;
const HloInstruction* operand = instr->operand(0);
if (operand != input) {
VLOG(3) << "Input of dynamic index instruction is not the given operand.";
return std::nullopt;
}
int64_t dynamic_index = -1;
for (int64_t start_index = start_indices_offset;
start_index < instr->operand_count(); ++start_index) {
const HloInstruction* index = instr->operand(start_index);
if (Match(index, match::ConstantScalar())) {
std::optional<int64_t> offset =
LiteralUtil::LiteralAsScalarInt64(index->literal());
if (offset.has_value() && offset.value() != 0) {
VLOG(3) << "Constant index " << start_index << " is not zero.";
return std::nullopt;
}
continue;
}
if (IsEffectivelyStatic(index, config)) {
if (dynamic_index != -1) {
VLOG(3) << "Multiple non-constant indices.";
return std::nullopt;
}
dynamic_index = start_index - start_indices_offset;
}
}
if (dynamic_index == -1) {
VLOG(3) << "No dynamic index found.";
return std::nullopt;
}
return dynamic_index;
}
std::optional<int64_t> MatchShapeCoveringDynamicIndexInstruction(
const HloInstruction* instr, const HloInstruction* input, HloOpcode opcode,
const WhileLoopConfig& config) {
if (instr->opcode() != opcode) {
return std::nullopt;
}
int64_t start_indices_offset;
if (instr->opcode() == HloOpcode::kDynamicSlice) {
start_indices_offset = 1;
} else if (instr->opcode() == HloOpcode::kDynamicUpdateSlice) {
start_indices_offset = 2;
} else {
return std::nullopt;
}
const HloInstruction* operand = instr->operand(0);
if (input != nullptr && operand != input) {
VLOG(3) << "Input of dynamic index instruction is not the given operand.";
return std::nullopt;
}
int64_t dynamic_index = -1;
for (int64_t start_index = start_indices_offset;
start_index < instr->operand_count(); ++start_index) {
const HloInstruction* index = instr->operand(start_index);
if (Match(index, match::ConstantScalar())) {
std::optional<int64_t> offset =
LiteralUtil::LiteralAsScalarInt64(index->literal());
if (offset.has_value() && offset.value() != 0) {
VLOG(3) << "Constant index " << start_index << " is not zero.";
return std::nullopt;
}
continue;
}
if (IsLoopInductionVar(index, config)) {
if (dynamic_index != -1) {
VLOG(3) << "Multiple non-constant indices.";
return std::nullopt;
}
dynamic_index = start_index - start_indices_offset;
}
}
if (dynamic_index == -1) {
VLOG(3) << "No dynamic index found.";
return std::nullopt;
}
if (operand->shape().dimensions(dynamic_index) != config.trip_count) {
VLOG(3) << "The shape's broadcast_dim must be exactly equal to the loop "
"trip count.";
return std::nullopt;
}
return dynamic_index;
}
std::optional<WhileLoopConfig> WhileLoopUnroller::IsLoopUnrollable(
HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
CHECK_EQ(while_op->operands().size(), 1);
if (while_op->operands().size() != 1) {
VLOG(5) << absl::StrCat(
"Cannot unroll while loop ", while_op->name(),
". While loop must have a single "
"tuple operand, instead has more than one operand: ",
while_op->operands().size());
return std::nullopt;
}
std::vector<HloInstruction*> while_dependees;
for (HloComputation* comp : while_op->GetModule()->computations()) {
for (HloInstruction* instr : comp->instructions()) {
for (HloInstruction* control_dep : instr->control_predecessors()) {
if (control_dep->opcode() == HloOpcode::kWhile) {
while_dependees.push_back(control_dep);
}
}
}
}
if (absl::linear_search(while_dependees.begin(), while_dependees.end(),
while_op)) {
VLOG(2) << "Not attempting to unroll " << while_op->name()
<< " due to control dependency: " << while_op->ToShortString();
return std::nullopt;
}
if (ContainsInstrWithOpcode(while_op->while_body(),
{HloOpcode::kSend, HloOpcode::kSendDone,
HloOpcode::kRecv, HloOpcode::kRecvDone}) ||
ContainsInstrWithOpcode(while_op->while_condition(),
{HloOpcode::kSend, HloOpcode::kSendDone,
HloOpcode::kRecv, HloOpcode::kRecvDone})) {
VLOG(2) << "Not attempting to unroll " << while_op->name()
<< " because it contains a send/recv node: "
<< while_op->ToShortString();
return std::nullopt;
}
if (while_op->operand(0)->opcode() != HloOpcode::kTuple) {
VLOG(2) << "Not attempting to unroll " << while_op->name()
<< " because the operand is not a tuple: "
<< while_op->ToShortString();
return std::nullopt;
}
if (while_op->while_condition()->HasSideEffect()) {
VLOG(2) << "Not attempting to remove while loop whose condition contains "
"side-effecting instructions: "
<< while_op->ToShortString();
return std::nullopt;
}
std::optional<int64_t> indvar_tuple_idx =
GetLoopInductionVarTupleIdx(while_op);
if (!indvar_tuple_idx.has_value()) {
VLOG(2) << "Not attempting to unroll because induction variable could not "
"be found.";
return std::nullopt;
}
HloEvaluator evaluator(0);
const HloInstruction* while_init = while_op->operand(0);
const HloInstruction* indvar_init = while_init->operand(*indvar_tuple_idx);
absl::StatusOr<Literal> indvar_init_result = evaluator.Evaluate(indvar_init);
if (!indvar_init_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable init, "
<< indvar_init_result.status() << ", " << indvar_init->ToString();
return std::nullopt;
}
Literal indvar_iter_val = std::move(indvar_init_result).value();
std::optional<int64_t> trip_count =
MatchTrivialLoopTripCount(while_op, *indvar_tuple_idx, indvar_iter_val);
if (!trip_count.has_value()) {
VLOG(3) << "Loop doesn't have trivial trip count";
return std::nullopt;
}
VLOG(3) << "Loop trip count " << trip_count.value();
WhileLoopConfig config;
config.while_instr = while_op;
config.init =
LiteralUtil::LiteralAsScalarInt64(std::move(indvar_iter_val)).value();
config.trip_count = trip_count.value();
config.induction_var_idx = *indvar_tuple_idx;
return config;
}
absl::StatusOr<bool> WhileLoopUnroller::PrepareModuleForUnrolling(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(
bool applied_cse,
HloCSE(true, false,
false, true)
.Run(module, execution_threads));
if (applied_cse) {
changed = true;
VLOG(3) << "Applied hlo cse to module " << module->name();
}
TF_ASSIGN_OR_RETURN(bool applied_tuple_simplifier,
TupleSimplifier{}.Run(module, execution_threads));
if (applied_tuple_simplifier) {
changed = true;
VLOG(3) << "Applied tuple simplifier to module " << module->name();
}
HloPassFix<WhileLoopConstantSinking> constant_sinking(
true,
true);
TF_ASSIGN_OR_RETURN(bool applied_constant_sinking,
constant_sinking.Run(module, execution_threads));
if (applied_constant_sinking) {
changed = true;
VLOG(3) << "Applied constant sinking to module " << module->name();
}
return changed;
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>>
WhileLoopUnroller::GetUnrollableLoops(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
std::optional<UnrollConfig> unroll_config) {
std::vector<HloInstruction*> all_while_ops;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(all_while_ops),
HloPredicateIsOp<HloOpcode::kWhile>);
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> while_loop_configs;
for (HloInstruction* instr : all_while_ops) {
std::optional<WhileLoopConfig> config = IsLoopUnrollable(instr);
if (!config.has_value()) {
continue;
}
if (unroll_config.has_value() &&
!InitialFeasibilityCheck(instr, config.value(),
unroll_config.value())) {
VLOG(3) << "Initial feasibility check failed for " << instr->name();
continue;
}
while_loop_configs.emplace_back(instr, config.value());
}
return while_loop_configs;
}
absl::StatusOr<UnrollResult>
WhileLoopUnroller::UnrollAndReturnReplacement(
HloInstruction* while_op, int64_t unroll_factor, bool wrap_in_trivial_loop,
bool force_unroll, bool prepare, const UnrollConfig& unroll_config) {
UnrollResult result;
HloModule* module = while_op->GetModule();
if (unroll_factor != -1) {
VLOG(5) << absl::StrCat(
"Currently, only full unrolling is supported, unroll factor: ",
unroll_factor);
return result;
}
if (prepare) {
TF_RETURN_IF_ERROR(
PrepareModuleForUnrolling(module, {}).status());
}
std::optional<WhileLoopConfig> config = IsLoopUnrollable(while_op);
if (!config.has_value()) {
VLOG(5) << "Not attempting to unroll " << while_op->name()
<< " because it is not unrollable.";
return result;
}
if (!force_unroll &&
!InitialFeasibilityCheck(while_op, config.value(), unroll_config)) {
return result;
}
if (wrap_in_trivial_loop) {
TF_ASSIGN_OR_RETURN(result, UnrollInternalWrappedAndReturnReplacement(
while_op, config.value()));
} else {
TF_ASSIGN_OR_RETURN(result.unrolled,
UnrollInternal(while_op, config.value()));
}
if (result.unrolled) {
TF_RETURN_IF_ERROR(CallInliner().Run(module).status());
}
return result;
}
absl::StatusOr<bool> WhileLoopUnroller::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (unroll_factor_ != -1) {
return false;
}
XLA_VLOG_LINES(3, "WhileLoopUnroller::Run(), before:\n" + module->ToString());
bool changed = false;
TF_ASSIGN_OR_RETURN(changed,
PrepareModuleForUnrolling(module, execution_threads));
std::vector<HloInstruction*> all_while_ops;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(all_while_ops),
HloPredicateIsOp<HloOpcode::kWhile>);
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>>
unrollable_while_ops = GetUnrollableLoops(
module, execution_threads, unroll_config_);
VLOG(3) << "Number of while instructions in the module to unroll: "
<< unrollable_while_ops.size();
bool unrolled = false;
for (auto& [while_op, config] : unrollable_while_ops) {
if (wrap_in_trivial_loop_) {
TF_ASSIGN_OR_RETURN(unrolled, UnrollInternalWrapped(while_op, config));
} else {
TF_ASSIGN_OR_RETURN(unrolled, UnrollInternal(while_op, config));
}
changed |= unrolled;
}
if (changed) {
TF_RETURN_IF_ERROR(CallInliner().Run(module, execution_threads).status());
}
XLA_VLOG_LINES(3, "WhileLoopUnroller::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/while_loop_unroller.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class WhileLoopUnrollerTest : public HloTestBase {
protected:
[[nodiscard]] std::unique_ptr<VerifiedHloModule> MakeModuleWithSimpleLoop(
int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithLoopBodyIndirectInc(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithNestedLoopBodyIndirectInc(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithWhileFeedingAnotherWhile(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithSimpleLoopAllReduce(int num_iters);
public:
void UnrollAndCompare(std::unique_ptr<HloModule> module,
absl::Span<Literal* const> arguments,
int64_t unroll_factor = -1, bool wrap_in_loop = false) {
Literal before_unroll = ExecuteAndTransfer(module->Clone(), arguments);
VLOG(2) << "before unroll value: " << before_unroll.ToString();
EXPECT_TRUE(WhileLoopUnroller(unroll_factor, wrap_in_loop)
.Run(module.get())
.value());
Literal after_unroll = ExecuteAndTransfer(std::move(module), arguments);
VLOG(2) << "after unroll value: " << after_unroll.ToString();
ASSERT_TRUE(LiteralTestUtil::NearOrEqual(before_unroll,
after_unroll,
std::nullopt));
}
};
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithSimpleLoop(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(loop_var.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[]{:T(128)}, s32[3]{0}) tuple(idx, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithLoopBodyIndirectInc(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}) tuple(inc, get-tuple-element.2, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, constant.4)
ROOT while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithNestedLoopBodyIndirectInc(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}) tuple(inc, get-tuple-element.2, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, constant.4)
ROOT while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
OuterLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.22 = s32[3]{0} get-tuple-element(loop_var.1), index=2
get-tuple-element.3 = s32[10]{0} get-tuple-element(loop_var.1), index=3
output = s32[10]{0} add(get-tuple-element.3, get-tuple-element.3)
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, get-tuple-element.22)
inner-while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
get-tuple-element.6 = s32[3]{0} get-tuple-element(inner-while), index=2
inc = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}, s32[10]{0}) tuple(inc, get-tuple-element.2, get-tuple-element.6, output)
}
OuterLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY OuterLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
constant.5 = s32[10]{0} constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
tuple.1 = (s32[], s32[], s32[3]{0}, s32[10]{0}) tuple(constant.3, constant.1, constant.4, constant.5)
ROOT while = (s32[], s32[], s32[3]{0}, s32[10]{0}) while(tuple.1), condition=
OuterLoop.condition, body=OuterLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithWhileFeedingAnotherWhile(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
const1 = s32[] constant(1)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=1
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(get-tuple-element.1, const1)
ROOT tuple = (s32[], s32[3]{0}) tuple(inc, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
OuterLoop.body {
loop_var.1 = (s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.22 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[10]{0} get-tuple-element(loop_var.1), index=2
output1 = s32[3]{0} add(get-tuple-element.22, get-tuple-element.22)
output2 = s32[10]{0} add(get-tuple-element.3, get-tuple-element.3)
one = s32[] constant(1)
inc = s32[] add(get-tuple-element.1, one)
ROOT tuple = (s32[], s32[3]{0}, s32[10]{0}) tuple(inc, output1, output2)
}
OuterLoop.condition {
loop_var.2 = (s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY entry.comp {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
constant.5 = s32[10]{0} constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
inner-while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
get-tuple-element.6 = s32[3]{0} get-tuple-element(inner-while), index=1
tuple.2 = (s32[], s32[3]{0}, s32[10]{0}) tuple(constant.3, get-tuple-element.6, constant.5)
ROOT while = (s32[], s32[3]{0}, s32[10]{0}) while(tuple.2), condition=
OuterLoop.condition, body=OuterLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithSimpleLoopAllReduce(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
SimpleLoop.body {
loop_var.1 = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = f32[1024, 1024] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = f32[1024, 1024] get-tuple-element(loop_var.1), index=2
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] get-tuple-element.2), channel_id=1, replica_groups={{0}}, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] get-tuple-element.3)
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(add, get-tuple-element.2, %accumulation)
}
SimpleLoop.condition {
loop_var.2 = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
%param.1 = f32[1024, 1024] parameter(0)
constant.3 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
tuple.1 = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(constant.3, %param.1, %accumulation_buffer)
ROOT while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(tuple.1), condition=SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopUnroll) {
UnrollAndCompare(MakeModuleWithSimpleLoop(5), {}, -1, false);
UnrollAndCompare(MakeModuleWithSimpleLoop(5), {}, -1, true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopUnrollNeedPrepare) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s64[] get-tuple-element(loop_var.1), index=2
add = s64[] add(get-tuple-element.1, get-tuple-element.3)
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}, s64[]) tuple(add, multiply, get-tuple-element.3)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
one = s64[] constant(1)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}, s64[]) tuple(constant.3, constant.4, one)
while = (s64[], s32[3]{0}, s64[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopUnrollNeedPrepare2) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s64[] get-tuple-element(loop_var.1), index=2
add = s64[] add(get-tuple-element.1, get-tuple-element.3)
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}, s64[]) tuple(add, multiply, get-tuple-element.3)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
one = s64[] constant(1)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}, s64[]) tuple(constant.3, constant.4, one)
gte1 = s64[] get-tuple-element(tuple.1), index=0
gte2 = s32[3]{0} get-tuple-element(tuple.1), index=1
gte3 = s64[] get-tuple-element(tuple.1), index=2
tuple = (s64[], s32[3]{0}, s64[]) tuple(gte1, gte2, gte3)
while = (s64[], s32[3]{0}, s64[]) while(tuple), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopNotRoot) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, GetUnrollableLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body.2 {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition.2 {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body.3 {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] multiply(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition.3 {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while1 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
while3 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition.3, body=SimpleLoop.body.3
while2 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition.2, body=SimpleLoop.body.2
o1 = s32[3]{0} get-tuple-element(while1), index=1
o2 = s32[3]{0} get-tuple-element(while2), index=1
ROOT result = (s32[3]{0}, s32[3]{0}) tuple(o1,o2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto unrollable_loops = WhileLoopUnroller::GetUnrollableLoops(
module.get(), {}, std::nullopt);
EXPECT_EQ(unrollable_loops.size(), 2);
}
TEST_F(WhileLoopUnrollerTest, UnrollMutipleLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body.2 {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition.2 {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while1 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
input = s32[3]{0} get-tuple-element(while1), index=1
tuple.2 = (s64[], s32[3]{0}) tuple(constant.3, input)
while2 = (s64[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition.2, body=SimpleLoop.body.2
o1 = s32[3]{0} get-tuple-element(while1), index=1
o2 = s32[3]{0} get-tuple-element(while2), index=1
ROOT result = (s32[3]{0}, s32[3]{0}) tuple(o1,o2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
UnrollResult unrolled_result,
WhileLoopUnroller::UnrollAndReturnReplacement(
module->entry_computation()->GetInstructionWithName("while1")));
bool unrolled1 = unrolled_result.unrolled;
EXPECT_TRUE(unrolled1);
std::vector<HloInstruction*> call_instrs_1;
for (auto* comp : module->MakeComputationPostOrder()) {
absl::c_copy_if(comp->instructions(), std::back_inserter(call_instrs_1),
HloPredicateIsOp<HloOpcode::kCall>);
}
EXPECT_EQ(call_instrs_1.size(), 0);
TF_ASSERT_OK_AND_ASSIGN(
UnrollResult unrolled_result2,
WhileLoopUnroller::UnrollAndReturnReplacement(
module->entry_computation()->GetInstructionWithName("while2")));
bool unrolled2 = unrolled_result2.unrolled;
EXPECT_TRUE(unrolled2);
std::vector<HloInstruction*> call_instrs_2;
for (auto* comp : module->MakeComputationPostOrder()) {
absl::c_copy_if(comp->instructions(), std::back_inserter(call_instrs_2),
HloPredicateIsOp<HloOpcode::kCall>);
}
EXPECT_EQ(call_instrs_2.size(), 0);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopNonZeroInit) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(4)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopS16IndVar) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s16[] get-tuple-element(loop_var.1), index=0
constant.1 = s16[] constant(1)
add = s16[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s16[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s16[] get-tuple-element(loop_var.2), index=0
constant.2 = s16[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s16[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s16[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s16[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, LoopWithControlDep) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s16[] get-tuple-element(loop_var.1), index=0
constant.1 = s16[] constant(1)
add = s16[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s16[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s16[] get-tuple-element(loop_var.2), index=0
constant.2 = s16[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s16[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s16[], s32[3]{0}) tuple(constant.3, constant.4)
while1 = (s16[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
copy1 = copy(constant.3), control-predecessors={while1}
ROOT add = add(copy1, constant.3)
}
)";
EXPECT_FALSE(WhileLoopUnroller()
.Run(ParseAndReturnVerifiedModule(hlo_string).value().get())
.value());
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopPartialUnroll) {
auto m = MakeModuleWithSimpleLoop(5);
EXPECT_FALSE(WhileLoopUnroller(3).Run(m.get()).value());
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopNoUnrollDueToTripCountThreshold) {
auto m = MakeModuleWithSimpleLoop(5);
UnrollConfig config;
config.trip_count_threshold = 0;
EXPECT_FALSE(WhileLoopUnroller(-1,
false, config)
.Run(m.get())
.value());
}
TEST_F(WhileLoopUnrollerTest, IndirectBodyInc) {
std::unique_ptr<HloModule> module =
MakeModuleWithLoopBodyIndirectInc(5);
UnrollAndCompare(MakeModuleWithLoopBodyIndirectInc(5), {}, -1,
false);
UnrollAndCompare(MakeModuleWithLoopBodyIndirectInc(5), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, NestedIndirectBodyInc) {
std::unique_ptr<HloModule> module =
MakeModuleWithNestedLoopBodyIndirectInc(5);
UnrollAndCompare(MakeModuleWithNestedLoopBodyIndirectInc(5), {},
-1, false);
UnrollAndCompare(MakeModuleWithNestedLoopBodyIndirectInc(5), {},
-1, true);
}
TEST_F(WhileLoopUnrollerTest, WhileFeedingWhile) {
UnrollAndCompare(MakeModuleWithWhileFeedingAnotherWhile(5), {},
-1, false);
UnrollAndCompare(MakeModuleWithWhileFeedingAnotherWhile(5), {},
-1, true);
}
TEST_F(WhileLoopUnrollerTest, LoopWithCollective) {
int64_t num_iters = 5;
auto module = MakeModuleWithSimpleLoopAllReduce(num_iters);
EXPECT_TRUE(
WhileLoopUnroller(-1).Run(module.get()).value());
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return instruction->opcode() ==
HloOpcode::kAllReduce;
}),
num_iters);
}
TEST_F(WhileLoopUnrollerTest, LoopWithCollective2) {
std::string hlo_string = R"(
HloModule module, entry_computation_layout={(s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)})->(s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[256]{0:T(256)}, u32[]{:T(128)}, u32[]{:T(128)}, s32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)})}
fused_computation.70.clone.clone.clone {
param_0.10545 = s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)} parameter(0)
ROOT bitcast.7213 = s8[32,2048,1]{1,0,2:T(8,128)(4,1)} bitcast(param_0.10545)
}
fused_computation.68.clone.clone.clone {
param_1.12561 = s8[1,2048,1,4096]{3,1,2,0:T(8,128)(4,1)S(1)} parameter(1)
constant.26622 = s8[]{:T(512)} constant(0)
pad.3783 = s8[1,2048,2,4096]{3,1,2,0:T(8,128)(4,1)} pad(param_1.12561, constant.26622), padding=0_0x0_0x0_1x0_0
constant.26621 = s32[]{:T(128)} constant(0)
param_2.10214 = s32[]{:T(128)S(6)} parameter(2)
dynamic-slice.5474 = s8[1,2048,2,256]{3,1,2,0:T(8,128)(4,1)} dynamic-slice(pad.3783, constant.26621, constant.26621, constant.26621, param_2.10214), dynamic_slice_sizes={1,2048,2,256}
pad.3782 = s8[1,2048,2,4096]{3,1,2,0:T(8,128)(4,1)} pad(param_1.12561, constant.26622), padding=0_0x0_0x1_0x0_0
param_0.10544 = s32[]{:T(128)S(6)} parameter(0)
dynamic-slice.5473 = s8[1,2048,2,256]{3,1,2,0:T(8,128)(4,1)} dynamic-slice(pad.3782, constant.26621, constant.26621, constant.26621, param_0.10544), dynamic_slice_sizes={1,2048,2,256}
add.10207 = s8[1,2048,2,256]{3,1,2,0:T(8,128)(4,1)} add(dynamic-slice.5474, dynamic-slice.5473)
ROOT bitcast.7212 = s8[2048,2,256]{2,0,1:T(8,128)(4,1)} bitcast(add.10207)
}
fused_computation.71.clone {
param_3.7588 = s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)} parameter(3)
fusion.4288 = s8[32,2048,1]{1,0,2:T(8,128)(4,1)} fusion(param_3.7588), kind=kLoop, calls=fused_computation.70.clone.clone.clone
param_0.10546 = s32[]{:T(128)S(6)} parameter(0)
param_1.12562 = s8[1,2048,1,4096]{3,1,2,0:T(8,128)(4,1)S(1)} parameter(1)
param_2.10215 = s32[]{:T(128)S(6)} parameter(2)
fusion.4287 = s8[2048,2,256]{2,0,1:T(8,128)(4,1)} fusion(param_0.10546, param_1.12562, param_2.10215), kind=kLoop, calls=fused_computation.68.clone.clone.clone
convolution.802 = s32[32,2,256]{2,0,1:T(8,128)} convolution(fusion.4288, fusion.4287), window={size=2 pad=1_1 rhs_reversal=1}, dim_labels=bf0_i0o->b0f
ROOT bitcast.7214 = s32[1,32,2,256]{3,1,2,0:T(8,128)S(1)} bitcast(convolution.802)
}
fused_computation.76.clone {
param_0.10547 = s32[1,32,256]{2,1,0:T(8,128)S(1)} parameter(0)
param_1.12563 = s32[1,32,2,256]{3,1,2,0:T(8,128)S(1)} parameter(1)
slice.12606 = s32[1,32,1,256]{3,1,2,0:T(8,128)} slice(param_1.12563), slice={[0:1], [0:32], [1:2], [0:256]}
bitcast.7215 = s32[1,32,256]{2,1,0:T(8,128)} bitcast(slice.12606)
add.10208 = s32[1,32,256]{2,1,0:T(8,128)S(1)} add(param_0.10547, bitcast.7215)
param_2.10216 = s32[1,32,256]{2,1,0:T(8,128)S(1)} parameter(2)
slice.12000.clone.2 = s32[1,32,1,256]{3,1,2,0:T(8,128)} slice(param_1.12563), slice={[0:1], [0:32], [0:1], [0:256]}
bitcast.1776.clone.2 = s32[1,32,256]{2,1,0:T(8,128)} bitcast(slice.12000.clone.2)
add.6006.clone.2 = s32[1,32,256]{2,1,0:T(8,128)S(1)} add(param_2.10216, bitcast.1776.clone.2)
ROOT tuple.2892 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}) tuple(add.10208, add.6006.clone.2)
}
fused_computation.69.clone.clone.clone {
param_0.10549 = s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)} parameter(0)
ROOT bitcast.7217 = s8[32,2048,1]{1,0,2:T(8,128)(4,1)} bitcast(param_0.10549)
}
fused_computation.66.clone.clone.clone {
param_1.12564 = s8[1,2048,1,4096]{3,1,2,0:T(8,128)(4,1)S(1)} parameter(1)
constant.26625 = s8[]{:T(512)} constant(0)
pad.3785 = s8[1,2048,2,4096]{3,1,2,0:T(8,128)(4,1)} pad(param_1.12564, constant.26625), padding=0_0x0_0x0_1x0_0
constant.26624 = s32[]{:T(128)} constant(0)
param_2.10217 = s32[]{:T(128)S(6)} parameter(2)
dynamic-slice.5476 = s8[1,2048,2,256]{3,1,2,0:T(8,128)(4,1)} dynamic-slice(pad.3785, constant.26624, constant.26624, constant.26624, param_2.10217), dynamic_slice_sizes={1,2048,2,256}
pad.3784 = s8[1,2048,2,4096]{3,1,2,0:T(8,128)(4,1)} pad(param_1.12564, constant.26625), padding=0_0x0_0x1_0x0_0
param_0.10548 = s32[]{:T(128)S(6)} parameter(0)
dynamic-slice.5475 = s8[1,2048,2,256]{3,1,2,0:T(8,128)(4,1)} dynamic-slice(pad.3784, constant.26624, constant.26624, constant.26624, param_0.10548), dynamic_slice_sizes={1,2048,2,256}
add.10212 = s8[1,2048,2,256]{3,1,2,0:T(8,128)(4,1)} add(dynamic-slice.5476, dynamic-slice.5475)
ROOT bitcast.7216 = s8[2048,2,256]{2,0,1:T(8,128)(4,1)} bitcast(add.10212)
}
fused_computation.72.clone {
param_3.7589 = s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)} parameter(3)
fusion.4292 = s8[32,2048,1]{1,0,2:T(8,128)(4,1)} fusion(param_3.7589), kind=kLoop, calls=fused_computation.69.clone.clone.clone
param_0.10550 = s32[]{:T(128)S(6)} parameter(0)
param_1.12565 = s8[1,2048,1,4096]{3,1,2,0:T(8,128)(4,1)S(1)} parameter(1)
param_2.10218 = s32[]{:T(128)S(6)} parameter(2)
fusion.4291 = s8[2048,2,256]{2,0,1:T(8,128)(4,1)} fusion(param_0.10550, param_1.12565, param_2.10218), kind=kLoop, calls=fused_computation.66.clone.clone.clone
convolution.803 = s32[32,2,256]{2,0,1:T(8,128)} convolution(fusion.4292, fusion.4291), window={size=2 pad=1_1 rhs_reversal=1}, dim_labels=bf0_i0o->b0f
ROOT bitcast.7218 = s32[1,32,2,256]{3,1,2,0:T(8,128)S(1)} bitcast(convolution.803)
}
fused_computation.74.clone {
param_0.10551 = s32[1,32,256]{2,1,0:T(8,128)S(1)} parameter(0)
param_1.12566 = s32[1,32,2,256]{3,1,2,0:T(8,128)S(1)} parameter(1)
slice.12607 = s32[1,32,1,256]{3,1,2,0:T(8,128)} slice(param_1.12566), slice={[0:1], [0:32], [1:2], [0:256]}
bitcast.7219 = s32[1,32,256]{2,1,0:T(8,128)} bitcast(slice.12607)
add.10213 = s32[1,32,256]{2,1,0:T(8,128)S(1)} add(param_0.10551, bitcast.7219)
param_2.10219 = s32[1,32,256]{2,1,0:T(8,128)S(1)} parameter(2)
slice.11997.clone.2 = s32[1,32,1,256]{3,1,2,0:T(8,128)} slice(param_1.12566), slice={[0:1], [0:32], [0:1], [0:256]}
bitcast.1773.clone.2 = s32[1,32,256]{2,1,0:T(8,128)} bitcast(slice.11997.clone.2)
add.6005.clone.2 = s32[1,32,256]{2,1,0:T(8,128)S(1)} add(param_2.10219, bitcast.1773.clone.2)
ROOT tuple.2893 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}) tuple(add.10213, add.6005.clone.2)
}
wide.windowed_dot_general_body {
wide_param.41 = (s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[256]{0:T(256)}, u32[]{:T(128)}, u32[]{:T(128)}, s32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}) parameter(0)
get-tuple-element.29000 = s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)} get-tuple-element(wide_param.41), index=0
get-tuple-element.29001 = s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)} get-tuple-element(wide_param.41), index=1
get-tuple-element.28990 = s32[1,32,256]{2,1,0:T(8,128)S(1)} get-tuple-element(wide_param.41), index=3
collective-permute-start = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:S(2)}, u32[]{:S(2)}) collective-permute-start(get-tuple-element.28990), channel_id=18, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0},{16,17},{17,18},{18,19},{19,20},{20,21},{21,22},{22,23},{23,24},{24,25},{25,26},{26,27},{27,28},{28,29},{29,30},{30,31},{31,16},{32,33},{33,34},{34,35},{35,36},{36,37},{37,38},{38,39},{39,40},{40,41},{41,42},{42,43},{43,44},{44,45},{45,46},{46,47},{47,32},{48,49},{49,50},{50,51},{51,52},{52,53},{53,54},{54,55},{55,56},{56,57},{57,58},{58,59},{59,60},{60,61},{61,62},{62,63},{63,48},{64,65},{65,66},{66,67},{67,68},{68,69},{69,70},{70,71},{71,72},{72,73},{73,74},{74,75},{75,76},{76,77},{77,78},{78,79},{79,64},{80,81},{81,82},{82,83},{83,84},{84,85},{85,86},{86,87},{87,88},{88,89},{89,90},{90,91},{91,92},{92,93},{93,94},{94,95},{95,80},{96,97},{97,98},{98,99},{99,100},{100,101},{101,102},{102,103},{103,104},{104,105},{105,106},{106,107},{107,108},{108,109},{109,110},{110,111},{111,96},{112,113},{113,114},{114,115},{115,116},{116,117},{117,118},{118,119},{119,120},{120,121},{121,122},{122,123},{123,124},{124,125},{125,126},{126,127},{127,112},{128,129},{129,130},{130,131},{131,132},{132,133},{133,134},{134,135},{135,136},{136,137},{137,138},{138,139},{139,140},{140,141},{141,142},{142,143},{143,128},{144,145},{145,146},{146,147},{147,148},{148,149},{149,150},{150,151},{151,152},{152,153},{153,154},{154,155},{155,156},{156,157},{157,158},{158,159},{159,144},{160,161},{161,162},{162,163},{163,164},{164,165},{165,166},{166,167},{167,168},{168,169},{169,170},{170,171},{171,172},{172,173},{173,174},{174,175},{175,160},{176,177},{177,178},{178,179},{179,180},{180,181},{181,182},{182,183},{183,184},{184,185},{185,186},{186,187},{187,188},{188,189},{189,190},{190,191},{191,176},{192,193},{193,194},{194,195},{195,196},{196,197},{197,198},{198,199},{199,200},{200,201},{201,202},{202,203},{203,204},{204,205},{205,206},{206,207},{207,192},{208,209},{209,210},{210,211},{211,212},{212,213},{213,214},{214,215},{215,216},{216,217},{217,218},{218,219},{219,220},{220,221},{221,222},{222,223},{223,208},{224,225},{225,226},{226,227},{227,228},{228,229},{229,230},{230,231},{231,232},{232,233},{233,234},{234,235},{235,236},{236,237},{237,238},{238,239},{239,224},{240,241},{241,242},{242,243},{243,244},{244,245},{245,246},{246,247},{247,248},{248,249},{249,250},{250,251},{251,252},{252,253},{253,254},{254,255},{255,240}}
collective-permute-done = s32[1,32,256]{2,1,0:T(8,128)S(1)} collective-permute-done(collective-permute-start)
get-tuple-element.29005 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=5
get-tuple-element.29006 = u32[256]{0:T(256)} get-tuple-element(wide_param.41), index=6
partition-id.101 = u32[] partition-id()
dynamic-slice.5472 = u32[1]{0:T(128)} dynamic-slice(get-tuple-element.29006, partition-id.101), dynamic_slice_sizes={1}
bitcast.7210 = u32[]{:T(128)} bitcast(dynamic-slice.5472)
get-tuple-element.29007 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=7
add.10204 = u32[]{:T(128)S(6)} add(bitcast.7210, get-tuple-element.29007)
get-tuple-element.28991 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=4
subtract.2863 = u32[]{:T(128)S(6)} subtract(add.10204, get-tuple-element.28991)
get-tuple-element.29008 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=8
and.400 = u32[]{:T(128)S(6)} and(subtract.2863, get-tuple-element.29008)
clamp.1712 = u32[]{:T(128)S(6)} clamp(get-tuple-element.29005, and.400, get-tuple-element.29008)
convert.8615 = s32[]{:T(128)S(6)} convert(clamp.1712)
get-tuple-element.29009 = s32[]{:T(128)} get-tuple-element(wide_param.41), index=9
multiply.14830 = s32[]{:T(128)S(6)} multiply(convert.8615, get-tuple-element.29009)
bitcast.8823 = s8[1,2048,1,4096]{3,1,2,0:T(8,128)(4,1)S(1)} bitcast(get-tuple-element.29001)
add.10205 = u32[]{:T(128)S(6)} add(get-tuple-element.28991, bitcast.7210)
get-tuple-element.29010 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=10
add.10206 = u32[]{:T(128)S(6)} add(add.10205, get-tuple-element.29010)
and.401 = u32[]{:T(128)S(6)} and(add.10206, get-tuple-element.29008)
clamp.1713 = u32[]{:T(128)S(6)} clamp(get-tuple-element.29005, and.401, get-tuple-element.29008)
convert.8616 = s32[]{:T(128)S(6)} convert(clamp.1713)
multiply.14831 = s32[]{:T(128)S(6)} multiply(convert.8616, get-tuple-element.29009)
fusion.4289 = s32[1,32,2,256]{3,1,2,0:T(8,128)S(1)} fusion(multiply.14830, bitcast.8823, multiply.14831, get-tuple-element.29000), kind=kOutput, calls=fused_computation.71.clone
get-tuple-element.28989 = s32[1,32,256]{2,1,0:T(8,128)S(1)} get-tuple-element(wide_param.41), index=2
collective-permute-start.1 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:S(2)}, u32[]{:S(2)}) collective-permute-start(get-tuple-element.28989), channel_id=17, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14},{16,31},{17,16},{18,17},{19,18},{20,19},{21,20},{22,21},{23,22},{24,23},{25,24},{26,25},{27,26},{28,27},{29,28},{30,29},{31,30},{32,47},{33,32},{34,33},{35,34},{36,35},{37,36},{38,37},{39,38},{40,39},{41,40},{42,41},{43,42},{44,43},{45,44},{46,45},{47,46},{48,63},{49,48},{50,49},{51,50},{52,51},{53,52},{54,53},{55,54},{56,55},{57,56},{58,57},{59,58},{60,59},{61,60},{62,61},{63,62},{64,79},{65,64},{66,65},{67,66},{68,67},{69,68},{70,69},{71,70},{72,71},{73,72},{74,73},{75,74},{76,75},{77,76},{78,77},{79,78},{80,95},{81,80},{82,81},{83,82},{84,83},{85,84},{86,85},{87,86},{88,87},{89,88},{90,89},{91,90},{92,91},{93,92},{94,93},{95,94},{96,111},{97,96},{98,97},{99,98},{100,99},{101,100},{102,101},{103,102},{104,103},{105,104},{106,105},{107,106},{108,107},{109,108},{110,109},{111,110},{112,127},{113,112},{114,113},{115,114},{116,115},{117,116},{118,117},{119,118},{120,119},{121,120},{122,121},{123,122},{124,123},{125,124},{126,125},{127,126},{128,143},{129,128},{130,129},{131,130},{132,131},{133,132},{134,133},{135,134},{136,135},{137,136},{138,137},{139,138},{140,139},{141,140},{142,141},{143,142},{144,159},{145,144},{146,145},{147,146},{148,147},{149,148},{150,149},{151,150},{152,151},{153,152},{154,153},{155,154},{156,155},{157,156},{158,157},{159,158},{160,175},{161,160},{162,161},{163,162},{164,163},{165,164},{166,165},{167,166},{168,167},{169,168},{170,169},{171,170},{172,171},{173,172},{174,173},{175,174},{176,191},{177,176},{178,177},{179,178},{180,179},{181,180},{182,181},{183,182},{184,183},{185,184},{186,185},{187,186},{188,187},{189,188},{190,189},{191,190},{192,207},{193,192},{194,193},{195,194},{196,195},{197,196},{198,197},{199,198},{200,199},{201,200},{202,201},{203,202},{204,203},{205,204},{206,205},{207,206},{208,223},{209,208},{210,209},{211,210},{212,211},{213,212},{214,213},{215,214},{216,215},{217,216},{218,217},{219,218},{220,219},{221,220},{222,221},{223,222},{224,239},{225,224},{226,225},{227,226},{228,227},{229,228},{230,229},{231,230},{232,231},{233,232},{234,233},{235,234},{236,235},{237,236},{238,237},{239,238},{240,255},{241,240},{242,241},{243,242},{244,243},{245,244},{246,245},{247,246},{248,247},{249,248},{250,249},{251,250},{252,251},{253,252},{254,253},{255,254}}
collective-permute-done.1 = s32[1,32,256]{2,1,0:T(8,128)S(1)} collective-permute-done(collective-permute-start.1)
fusion.4290 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}) fusion(collective-permute-done, fusion.4289, collective-permute-done.1), kind=kLoop, calls=fused_computation.76.clone
get-tuple-element.22079 = s32[1,32,256]{2,1,0:T(8,128)S(1)} get-tuple-element(fusion.4290), index=0
collective-permute-start.2 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:S(2)}, u32[]{:S(2)}) collective-permute-start(get-tuple-element.22079), channel_id=20, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0},{16,17},{17,18},{18,19},{19,20},{20,21},{21,22},{22,23},{23,24},{24,25},{25,26},{26,27},{27,28},{28,29},{29,30},{30,31},{31,16},{32,33},{33,34},{34,35},{35,36},{36,37},{37,38},{38,39},{39,40},{40,41},{41,42},{42,43},{43,44},{44,45},{45,46},{46,47},{47,32},{48,49},{49,50},{50,51},{51,52},{52,53},{53,54},{54,55},{55,56},{56,57},{57,58},{58,59},{59,60},{60,61},{61,62},{62,63},{63,48},{64,65},{65,66},{66,67},{67,68},{68,69},{69,70},{70,71},{71,72},{72,73},{73,74},{74,75},{75,76},{76,77},{77,78},{78,79},{79,64},{80,81},{81,82},{82,83},{83,84},{84,85},{85,86},{86,87},{87,88},{88,89},{89,90},{90,91},{91,92},{92,93},{93,94},{94,95},{95,80},{96,97},{97,98},{98,99},{99,100},{100,101},{101,102},{102,103},{103,104},{104,105},{105,106},{106,107},{107,108},{108,109},{109,110},{110,111},{111,96},{112,113},{113,114},{114,115},{115,116},{116,117},{117,118},{118,119},{119,120},{120,121},{121,122},{122,123},{123,124},{124,125},{125,126},{126,127},{127,112},{128,129},{129,130},{130,131},{131,132},{132,133},{133,134},{134,135},{135,136},{136,137},{137,138},{138,139},{139,140},{140,141},{141,142},{142,143},{143,128},{144,145},{145,146},{146,147},{147,148},{148,149},{149,150},{150,151},{151,152},{152,153},{153,154},{154,155},{155,156},{156,157},{157,158},{158,159},{159,144},{160,161},{161,162},{162,163},{163,164},{164,165},{165,166},{166,167},{167,168},{168,169},{169,170},{170,171},{171,172},{172,173},{173,174},{174,175},{175,160},{176,177},{177,178},{178,179},{179,180},{180,181},{181,182},{182,183},{183,184},{184,185},{185,186},{186,187},{187,188},{188,189},{189,190},{190,191},{191,176},{192,193},{193,194},{194,195},{195,196},{196,197},{197,198},{198,199},{199,200},{200,201},{201,202},{202,203},{203,204},{204,205},{205,206},{206,207},{207,192},{208,209},{209,210},{210,211},{211,212},{212,213},{213,214},{214,215},{215,216},{216,217},{217,218},{218,219},{219,220},{220,221},{221,222},{222,223},{223,208},{224,225},{225,226},{226,227},{227,228},{228,229},{229,230},{230,231},{231,232},{232,233},{233,234},{234,235},{235,236},{236,237},{237,238},{238,239},{239,224},{240,241},{241,242},{242,243},{243,244},{244,245},{245,246},{246,247},{247,248},{248,249},{249,250},{250,251},{251,252},{252,253},{253,254},{254,255},{255,240}}
collective-permute-done.2 = s32[1,32,256]{2,1,0:T(8,128)S(1)} collective-permute-done(collective-permute-start.2)
get-tuple-element.29011 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=11
add.10209 = u32[]{:T(128)S(6)} add(get-tuple-element.28991, get-tuple-element.29011)
subtract.2864 = u32[]{:T(128)S(6)} subtract(add.10204, add.10209)
and.402 = u32[]{:T(128)S(6)} and(subtract.2864, get-tuple-element.29008)
clamp.1714 = u32[]{:T(128)S(6)} clamp(get-tuple-element.29005, and.402, get-tuple-element.29008)
convert.8617 = s32[]{:T(128)S(6)} convert(clamp.1714)
multiply.14832 = s32[]{:T(128)S(6)} multiply(convert.8617, get-tuple-element.29009)
bitcast.8824 = s8[1,2048,1,4096]{3,1,2,0:T(8,128)(4,1)S(1)} bitcast(get-tuple-element.29001)
add.10210 = u32[]{:T(128)S(6)} add(add.10209, bitcast.7210)
add.10211 = u32[]{:T(128)S(6)} add(add.10210, get-tuple-element.29010)
and.403 = u32[]{:T(128)S(6)} and(add.10211, get-tuple-element.29008)
clamp.1715 = u32[]{:T(128)S(6)} clamp(get-tuple-element.29005, and.403, get-tuple-element.29008)
convert.8618 = s32[]{:T(128)S(6)} convert(clamp.1715)
multiply.14833 = s32[]{:T(128)S(6)} multiply(convert.8618, get-tuple-element.29009)
fusion.4293 = s32[1,32,2,256]{3,1,2,0:T(8,128)S(1)} fusion(multiply.14832, bitcast.8824, multiply.14833, get-tuple-element.29000), kind=kOutput, calls=fused_computation.72.clone
get-tuple-element.22080 = s32[1,32,256]{2,1,0:T(8,128)S(1)} get-tuple-element(fusion.4290), index=1
collective-permute-start.3 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:S(2)}, u32[]{:S(2)}) collective-permute-start(get-tuple-element.22080), channel_id=19, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14},{16,31},{17,16},{18,17},{19,18},{20,19},{21,20},{22,21},{23,22},{24,23},{25,24},{26,25},{27,26},{28,27},{29,28},{30,29},{31,30},{32,47},{33,32},{34,33},{35,34},{36,35},{37,36},{38,37},{39,38},{40,39},{41,40},{42,41},{43,42},{44,43},{45,44},{46,45},{47,46},{48,63},{49,48},{50,49},{51,50},{52,51},{53,52},{54,53},{55,54},{56,55},{57,56},{58,57},{59,58},{60,59},{61,60},{62,61},{63,62},{64,79},{65,64},{66,65},{67,66},{68,67},{69,68},{70,69},{71,70},{72,71},{73,72},{74,73},{75,74},{76,75},{77,76},{78,77},{79,78},{80,95},{81,80},{82,81},{83,82},{84,83},{85,84},{86,85},{87,86},{88,87},{89,88},{90,89},{91,90},{92,91},{93,92},{94,93},{95,94},{96,111},{97,96},{98,97},{99,98},{100,99},{101,100},{102,101},{103,102},{104,103},{105,104},{106,105},{107,106},{108,107},{109,108},{110,109},{111,110},{112,127},{113,112},{114,113},{115,114},{116,115},{117,116},{118,117},{119,118},{120,119},{121,120},{122,121},{123,122},{124,123},{125,124},{126,125},{127,126},{128,143},{129,128},{130,129},{131,130},{132,131},{133,132},{134,133},{135,134},{136,135},{137,136},{138,137},{139,138},{140,139},{141,140},{142,141},{143,142},{144,159},{145,144},{146,145},{147,146},{148,147},{149,148},{150,149},{151,150},{152,151},{153,152},{154,153},{155,154},{156,155},{157,156},{158,157},{159,158},{160,175},{161,160},{162,161},{163,162},{164,163},{165,164},{166,165},{167,166},{168,167},{169,168},{170,169},{171,170},{172,171},{173,172},{174,173},{175,174},{176,191},{177,176},{178,177},{179,178},{180,179},{181,180},{182,181},{183,182},{184,183},{185,184},{186,185},{187,186},{188,187},{189,188},{190,189},{191,190},{192,207},{193,192},{194,193},{195,194},{196,195},{197,196},{198,197},{199,198},{200,199},{201,200},{202,201},{203,202},{204,203},{205,204},{206,205},{207,206},{208,223},{209,208},{210,209},{211,210},{212,211},{213,212},{214,213},{215,214},{216,215},{217,216},{218,217},{219,218},{220,219},{221,220},{222,221},{223,222},{224,239},{225,224},{226,225},{227,226},{228,227},{229,228},{230,229},{231,230},{232,231},{233,232},{234,233},{235,234},{236,235},{237,236},{238,237},{239,238},{240,255},{241,240},{242,241},{243,242},{244,243},{245,244},{246,245},{247,246},{248,247},{249,248},{250,249},{251,250},{252,251},{253,252},{254,253},{255,254}}
collective-permute-done.3 = s32[1,32,256]{2,1,0:T(8,128)S(1)} collective-permute-done(collective-permute-start.3)
fusion.4294 = (s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}) fusion(collective-permute-done.2, fusion.4293, collective-permute-done.3), kind=kLoop, calls=fused_computation.74.clone
get-tuple-element.29002 = s32[1,32,256]{2,1,0:T(8,128)S(1)} get-tuple-element(fusion.4294), index=1
get-tuple-element.29003 = s32[1,32,256]{2,1,0:T(8,128)S(1)} get-tuple-element(fusion.4294), index=0
get-tuple-element.29012 = u32[]{:T(128)} get-tuple-element(wide_param.41), index=12
constant.28871 = u32[]{:T(128)} constant(2)
add.10214 = u32[]{:T(128)} add(get-tuple-element.28991, constant.28871)
ROOT tuple.3341 = (s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[256]{0:T(256)}, u32[]{:T(128)}, u32[]{:T(128)}, s32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}) tuple(get-tuple-element.29000, get-tuple-element.29001, get-tuple-element.29002, get-tuple-element.29003, add.10214, get-tuple-element.29005, get-tuple-element.29006, get-tuple-element.29007, get-tuple-element.29008, get-tuple-element.29009, get-tuple-element.29010, get-tuple-element.29011, get-tuple-element.29012)
}
wide.windowed_dot_general_cond {
wide_param.40 = (s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[256]{0:T(256)}, u32[]{:T(128)}, u32[]{:T(128)}, s32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}) parameter(0)
get-tuple-element.22055 = u32[]{:T(128)} get-tuple-element(wide_param.40), index=4
constant.26614 = u32[]{:T(128)} constant(8)
ROOT compare.2683 = pred[]{:T(512)} compare(get-tuple-element.22055, constant.26614), direction=LT
}
ENTRY test {
fusion.4456 = s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)} parameter(0)
fusion.4457 = s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)} parameter(1)
broadcast.26239 = s32[1,32,256]{2,1,0:T(8,128)S(1)} parameter(2)
broadcast.26239.clone = s32[1,32,256]{2,1,0:T(8,128)S(1)} parameter(3)
constant.28863 = u32[]{:T(128)} constant(0)
constant.28864 = u32[]{:T(128)} constant(0)
constant.28865 = u32[256]{0:T(256)} constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255})
constant.28866 = u32[]{:T(128)} constant(8)
constant.28867 = u32[]{:T(128)} constant(15)
constant.28868 = s32[]{:T(128)} constant(256)
constant.28869 = u32[]{:T(128)} constant(9)
constant.28870 = u32[]{:T(128)} constant(1)
constant.28871 = u32[]{:T(128)} constant(2)
tuple.3339 = (s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[256]{0:T(256)}, u32[]{:T(128)}, u32[]{:T(128)}, s32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}) tuple(fusion.4456, fusion.4457, broadcast.26239, broadcast.26239.clone, constant.28863, constant.28864, constant.28865, constant.28866, constant.28867, constant.28868, constant.28869, constant.28870, constant.28871)
ROOT while.636 = (s8[1,32,2048]{2,1,0:T(8,128)(4,1)S(1)}, s8[1,2048,4096]{2,1,0:T(8,128)(4,1)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, s32[1,32,256]{2,1,0:T(8,128)S(1)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[256]{0:T(256)}, u32[]{:T(128)}, u32[]{:T(128)}, s32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}, u32[]{:T(128)}) while(tuple.3339), condition=wide.windowed_dot_general_cond, body=wide.windowed_dot_general_body
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
int64_t fusion_instr_count = absl::c_count_if(
module->GetComputationWithName("wide.windowed_dot_general_body")
->instructions(),
[](const HloInstruction* instr) {
return (instr->IsLoopFusion() || instr->IsOutputFusion());
});
EXPECT_TRUE(
WhileLoopUnroller(-1).Run(module.get()).value());
int64_t fusion_instr_count_after_unroll = absl::c_count_if(
module->entry_computation()->instructions(),
[](const HloInstruction* instr) {
return (instr->IsLoopFusion() || instr->IsOutputFusion());
});
EXPECT_EQ(fusion_instr_count * 4, fusion_instr_count_after_unroll);
}
TEST_F(WhileLoopUnrollerTest, MatchShapeCoveringDS) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3,10]{1,0}) parameter(0)
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(loop_var.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3,10]{1,0} get-tuple-element(loop_var.1), index=1
zero = s32[] constant(0)
slice = s32[1,10] dynamic-slice(get-tuple-element.2, get-tuple-element.1, zero), dynamic_slice_sizes={1,10}
output = s32[3,10]{1,0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[]{:T(128)}, s32[3,10]{1,0}) tuple(idx, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3,10]{1,0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3,10]{1,0} constant({...})
tuple.1 = (s32[]{:T(128)}, s32[3,10]{1,0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3,10]{1,0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(3)}});
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* loop = module->entry_computation()->root_instruction();
auto config = WhileLoopUnroller::IsLoopUnrollable(loop);
EXPECT_TRUE(config.has_value());
HloComputation* body = module->GetComputationWithName("SimpleLoop.body");
HloInstruction* input = body->GetInstructionWithName("get-tuple-element.2");
HloInstruction* instr = body->GetInstructionWithName("slice");
EXPECT_TRUE(MatchShapeCoveringDynamicIndexInstruction(
instr, input, HloOpcode::kDynamicSlice, config.value())
.has_value());
}
TEST_F(WhileLoopUnrollerTest, MatchShapeCoveringDSNested) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s32[3,10], p1: s32[]) -> s32[10] {
%param_0.51117 = s32[3,10] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
slice = s32[1,10] dynamic-slice(s32[3,10] %param_0.51117, p1, s32[] %constant.85694), dynamic_slice_sizes={1,10}
ROOT %bitcast.31250 = s32[10] bitcast(s32[1,10] slice)
}
%fused_computation.outer (param_1.30691: s32[3,10], p2: s32[]) -> s32[10] {
%param_1.30691 = s32[3,10] parameter(0)
p2 = s32[] parameter(1)
inner.fusion = s32[10] fusion(s32[3,10] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT out = s32[10] add(inner.fusion, inner.fusion)
}
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3,10]{1,0}) parameter(0)
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(loop_var.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3,10]{1,0} get-tuple-element(loop_var.1), index=1
zero = s32[] constant(0)
outer.fusion = s32[10] fusion(get-tuple-element.2, get-tuple-element.1), kind=kOutput, calls=%fused_computation.outer
output = s32[3,10]{1,0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[]{:T(128)}, s32[3,10]{1,0}) tuple(idx, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3,10]{1,0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3,10]{1,0} constant({...})
tuple.1 = (s32[]{:T(128)}, s32[3,10]{1,0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3,10]{1,0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(3)}});
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* loop = module->entry_computation()->root_instruction();
auto config = WhileLoopUnroller::IsLoopUnrollable(loop);
EXPECT_TRUE(config.has_value());
HloComputation* inner_fusion_comp =
module->GetComputationWithName("fused_computation.slice");
HloInstruction* instr = inner_fusion_comp->GetInstructionWithName("slice");
EXPECT_TRUE(MatchShapeCoveringDynamicIndexInstruction(
instr, inner_fusion_comp->parameter_instruction(0),
HloOpcode::kDynamicSlice, config.value())
.has_value());
}
TEST_F(WhileLoopUnrollerTest, UnrollLoopWithDynamicGte) {
std::string hlo_string = R"(
HloModule SimpleLoop, entry_computation_layout={(s8[6,128,128]{2,1,0}, bf16[8,128]{1,0})->bf16[8,128]{1,0}}
%fused_computation (param_0: s8[1,128,128]) -> s8[128,128] {
%param_0 = s8[1,128,128]{2,1,0} parameter(0)
ROOT %bitcast.1 = s8[128,128]{1,0} bitcast(s8[1,128,128]{2,1,0} %param_0)
}
%fused_computation.inner (param_0.34523: bf16[8,128], sliced: s8[1,128,128]) -> bf16[8,128] {
%sliced = s8[1,128,128]{2,1,0} parameter(1)
%param_0.34523 = bf16[8,128]{1,0} parameter(0)
%fusion = s8[128,128]{1,0} fusion(s8[1,128,128]{2,1,0} %sliced), kind=kLoop, calls=%fused_computation
ROOT %convolution.3447 = bf16[8,128]{1,0} convolution(bf16[8,128]{1,0} %param_0.34523, s8[128,128]{1,0} %fusion), dim_labels=bf_io->bf
}
%while.body (unstacked: (s32[], bf16[8,128], (s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128]))) -> (s32[], bf16[8,128], (s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128])) {
%unstacked = (s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) parameter(0)
%i = s32[] get-tuple-element((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %unstacked), index=0
%one = s32[] constant(1)
%inc = s32[] add(s32[] %i, s32[] %one)
%p0 = bf16[8,128]{1,0} get-tuple-element((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %unstacked), index=1
%p1.1 = (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}) get-tuple-element((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %unstacked), index=2
%two = s32[] constant(2)
%mult = s32[] multiply(s32[] %i, s32[] %two)
%custom-call = s8[1,128,128]{2,1,0} custom-call((s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}) %p1.1, s32[] %mult), custom_call_target="DynamicGte"
%fusion.conv = bf16[8,128]{1,0} fusion(bf16[8,128]{1,0} %p0, s8[1,128,128]{2,1,0} %custom-call), kind=kOutput, calls=%fused_computation.inner
ROOT %out = (s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) tuple(s32[] %inc, bf16[8,128]{1,0} %fusion.conv, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}) %p1.1)
}
%while.cond (unstacked.1: (s32[], bf16[8,128], (s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128], s8[1,128,128]))) -> pred[] {
%unstacked.1 = (s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) parameter(0)
%i.1 = s32[] get-tuple-element((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %unstacked.1), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] %i.1, s32[] %constant.12857), direction=LT
}
ENTRY %main (p0.1: s8[6,128,128], p1.2: bf16[8,128]) -> bf16[8,128] {
%init = s32[] constant(0)
%p1.2 = bf16[8,128]{1,0} parameter(1)
%p0.1 = s8[6,128,128]{2,1,0} parameter(0)
%while.input = (s32[], bf16[8,128]{1,0}, s8[6,128,128]{2,1,0}) tuple(s32[] %init, bf16[8,128]{1,0} %p1.2, s8[6,128,128]{2,1,0} %p0.1)
%slice = s8[1,128,128]{2,1,0} slice(s8[6,128,128]{2,1,0} %p0.1), slice={[0:1], [0:128], [0:128]}
%slice.1 = s8[1,128,128]{2,1,0} slice(s8[6,128,128]{2,1,0} %p0.1), slice={[1:2], [0:128], [0:128]}
%slice.2 = s8[1,128,128]{2,1,0} slice(s8[6,128,128]{2,1,0} %p0.1), slice={[2:3], [0:128], [0:128]}
%slice.3 = s8[1,128,128]{2,1,0} slice(s8[6,128,128]{2,1,0} %p0.1), slice={[3:4], [0:128], [0:128]}
%slice.4 = s8[1,128,128]{2,1,0} slice(s8[6,128,128]{2,1,0} %p0.1), slice={[4:5], [0:128], [0:128]}
%slice.5 = s8[1,128,128]{2,1,0} slice(s8[6,128,128]{2,1,0} %p0.1), slice={[5:6], [0:128], [0:128]}
%tuple = (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}) tuple(s8[1,128,128]{2,1,0} %slice, s8[1,128,128]{2,1,0} %slice.1, s8[1,128,128]{2,1,0} %slice.2, s8[1,128,128]{2,1,0} %slice.3, s8[1,128,128]{2,1,0} %slice.4, s8[1,128,128]{2,1,0} %slice.5)
%tuple.1 = (s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) tuple(s32[] %init, bf16[8,128]{1,0} %p1.2, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}) %tuple)
%while.out = (s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) while((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %tuple.1), condition=%while.cond, body=%while.body
%while_use = (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}) get-tuple-element((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %while.out), index=2
ROOT %out.1 = bf16[8,128]{1,0} get-tuple-element((s32[], bf16[8,128]{1,0}, (s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0}, s8[1,128,128]{2,1,0})) %while.out), index=1
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
TF_ASSERT_OK_AND_ASSIGN(UnrollResult unrolled_result,
WhileLoopUnroller::UnrollAndReturnReplacement(
loop, -1, false, true, true));
bool unrolled = unrolled_result.unrolled;
EXPECT_TRUE(unrolled);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
EXPECT_FALSE(instr->IsCustomCall("DynamicGte"));
EXPECT_FALSE(instr->IsCustomCall("DynamicTuple"));
}
}
TEST_F(WhileLoopUnrollerTest, IsEffectivelyStaticDynamicSlice) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[6,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[6,128,128] parameter(0)
static.p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.static = s8[1,128,128] dynamic-slice(s8[6,128,128] %param_0.51117, static.p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.static)
}
%fused_computation.slice.2 (param_0.51117: s8[6,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[6,128,128] parameter(0)
dynamic.p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.dynamic = s8[1,128,128] dynamic-slice(s8[6,128,128] %param_0.51117, dynamic.p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.dynamic)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[6,128,128], p2: s32[], p3: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[6,128,128] parameter(1)
static.p2 = s32[] parameter(2)
%fusion.1 = s8[128,128] fusion(s8[6,128,128] %param_1.30691, static.p2), kind=kLoop, calls=%fused_computation.slice
dynamic.p3 = s32[] parameter(3)
%fusion.2 = s8[128,128] fusion(s8[6,128,128] %param_1.30691, dynamic.p3), kind=kLoop, calls=%fused_computation.slice.2
out = s8[128,128] add(%fusion.1, %fusion.2)
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] out), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[6,128,128], s32[])) -> (s32[], bf16[8,128], s8[6,128,128], s32[]) {
wide_p = (s32[], bf16[8,128], s8[6,128,128], s32[]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[6,128,128] get-tuple-element(wide_p), index=2
dynamic.p2 = s32[] get-tuple-element(wide_p), index=3
one = s32[] constant(1)
inc = s32[] add(i, one)
two = s32[] constant(2)
mult = s32[] multiply(i, two)
fusion.conv = bf16[8,128] fusion(p0, p1, mult, dynamic.p2), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[6,128,128], s32[]) tuple(inc, fusion.conv, p1, dynamic.p2)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[6,128,128], s32[])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[6,128,128], s32[]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[6,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
p2 = s32[] parameter(2)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[6,128,128], s32[]) tuple(init, p1, p0, p2)
while.out = (s32[], bf16[8,128], s8[6,128,128], s32[]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[6,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<WhileLoopConfig> config =
WhileLoopUnroller::IsLoopUnrollable(loop);
EXPECT_TRUE(config.has_value());
for (HloComputation* comp : module->MakeComputationPostOrder()) {
HloInstruction* static_slice =
comp->GetInstructionWithName("dynamic-slice.static");
if (static_slice != nullptr) {
auto index = MatchEffectivelyStaticDynamicSliceInsideLoop(
static_slice, static_slice->operand(0), *config);
EXPECT_TRUE(index.has_value());
}
HloInstruction* dynamic_slice =
comp->GetInstructionWithName("dynamic-slice.dynamic");
if (dynamic_slice != nullptr) {
auto index = MatchEffectivelyStaticDynamicSliceInsideLoop(
dynamic_slice, dynamic_slice->operand(0), *config);
EXPECT_FALSE(index.has_value());
}
}
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopWithCustomCallNoTuple) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(get-tuple-element.1, get-tuple-element.2), custom_call_target="CustomCallStart"
get-tuple-element.3 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.3, constant.1)
get-tuple-element.4 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.4, get-tuple-element.4)
tuple = (s32[]{:T(128)}, s32[3]{0}) tuple(idx, output)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(idx, output), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.5, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
UnrollConfig config;
EXPECT_FALSE(WhileLoopUnroller(-1,
false, config)
.Run(m.get())
.value());
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopWithCustomCallNonTupleForRoot) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(loop_var.1), custom_call_target="CustomCallStart"
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(idx, output), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.5, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
UnrollConfig config;
EXPECT_TRUE(WhileLoopUnroller(-1,
false, config)
.Run(m.get())
.value());
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopWithCustomCall) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(loop_var.1), custom_call_target="CustomCallStart"
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
tuple = (s32[]{:T(128)}, s32[3]{0}) tuple(idx, output)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(tuple), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
UnrollConfig config;
EXPECT_TRUE(WhileLoopUnroller(-1,
false, config)
.Run(m.get())
.value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_unroller.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_unroller_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
773b7270-3bf2-4c44-8db0-c80349766cfe | cpp | tensorflow/tensorflow | shape_inference | tensorflow/compiler/jit/shape_inference.cc | tensorflow/compiler/jit/shape_inference_test.cc | #include "tensorflow/compiler/jit/shape_inference.h"
#include <cstdint>
#include <map>
#include <vector>
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
Status ShapeHandleToTensorShape(shape_inference::InferenceContext* context,
const shape_inference::ShapeHandle& handle,
PartialTensorShape* shape) {
if (!context->RankKnown(handle)) return absl::OkStatus();
std::vector<int64_t> dims(context->Rank(handle));
for (int32_t i = 0, end = dims.size(); i < end; ++i) {
dims[i] = context->Value(context->Dim(handle, i));
}
return PartialTensorShape::MakePartialShape(dims.data(), dims.size(), shape);
}
Status PropagateShapes(Graph* graph,
const std::map<int, InferredShape>& arg_shapes,
const std::vector<BackEdgeHelper::BackEdge>& back_edges,
ShapeRefiner* shape_refiner) {
std::map<const Node*, const Node*> merge_to_next_iteration;
for (const auto& e : back_edges) {
if (e.src->IsNextIteration() && e.dst->IsMerge()) {
merge_to_next_iteration[e.dst] = e.src;
}
}
std::vector<Node*> order;
GetReversePostOrder(*graph, &order);
for (Node* n : order) {
VLOG(4) << "Propagating shape for node " << n->name()
<< ", type: " << n->type_string();
Status status = shape_refiner->AddNode(n);
if (!status.ok()) {
VLOG(1) << "Shape inference failed for node " << n->name() << ": "
<< status;
} else {
shape_inference::InferenceContext* context = shape_refiner->GetContext(n);
for (int i = 0; i < n->num_outputs(); i++) {
shape_inference::ShapeHandle handle = context->output(i);
VLOG(4) << "Output " << i << " for node " << n->name() << ": "
<< context->DebugString(handle);
}
}
int index = -1;
if (n->type_string() == "_Arg") {
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
} else if (n->type_string() == "Placeholder") {
if (const auto s = GetNodeAttr(n->attrs(), "_index", &index); !s.ok()) {
VLOG(1) << "Failed to get node index for node " << n->name();
}
}
if (index >= 0) {
if (auto it = arg_shapes.find(index); it != arg_shapes.end()) {
const InferredShape& arg_shape = it->second;
shape_inference::InferenceContext* context =
shape_refiner->GetContext(n);
if (arg_shape.handle_type != DT_INVALID) {
shape_inference::ShapeHandle handle;
TF_RETURN_IF_ERROR(context->MakeShapeFromPartialTensorShape(
arg_shape.handle_shape, &handle));
context->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{
{handle, arg_shape.handle_type}});
}
shape_inference::ShapeHandle handle;
TF_RETURN_IF_ERROR(
context->MakeShapeFromPartialTensorShape(arg_shape.shape, &handle));
TF_RETURN_IF_ERROR(shape_refiner->SetShape(n, 0, handle));
}
}
if (n->type_string() == "VariableShape") {
shape_inference::InferenceContext* context = shape_refiner->GetContext(n);
auto handle_shapes_and_types = context->input_handle_shapes_and_types(0);
if (handle_shapes_and_types && !handle_shapes_and_types->empty()) {
shape_inference::ShapeHandle handle =
handle_shapes_and_types->at(0).shape;
TensorShapeProto shape_proto;
context->ShapeHandleToProto(handle, &shape_proto);
if (!shape_proto.unknown_rank()) {
NodeDef const_def;
const_def.set_op("Const");
Node* var_node;
TF_RETURN_IF_ERROR(n->input_node(0, &var_node));
const_def.set_name(
graph->NewName(absl::StrCat("var_shape_", var_node->name())));
DataType dtype = n->output_type(0);
AddNodeAttr("dtype", dtype, &const_def);
TensorProto value;
value.set_dtype(dtype);
value.mutable_tensor_shape()->add_dim()->set_size(
shape_proto.dim_size());
for (const auto& dim : shape_proto.dim()) {
if (dtype == DT_INT32) {
value.add_int_val(dim.size());
} else {
value.add_int64_val(dim.size());
}
}
AddNodeAttr("value", value, &const_def);
for (auto const& attr : n->attrs()) {
if (*attr.first.begin() == '_') {
AddNodeAttr(attr.first, attr.second, &const_def);
}
}
TF_ASSIGN_OR_RETURN(Node * const_node, graph->AddNode(const_def));
graph->AddControlEdge(var_node, const_node);
std::vector<const Edge*> out_edges(n->out_edges().begin(),
n->out_edges().end());
for (const Edge* e : out_edges) {
if (e->IsControlEdge()) {
graph->AddControlEdge(const_node, e->dst());
graph->RemoveEdge(e);
} else {
Node* dst = e->dst();
int dst_input = e->dst_input();
graph->RemoveEdge(e);
graph->AddEdge(const_node, 0, dst, dst_input);
}
}
}
}
}
if (n->IsMerge() && n->output_type(0) == DT_RESOURCE) {
auto iter = merge_to_next_iteration.find(n);
if (iter != merge_to_next_iteration.end()) {
const Node *next_iter = iter->second, *node = next_iter;
do {
TF_RETURN_IF_ERROR(node->input_node(0, &node));
} while (node->IsIdentity());
const Node* switch_input;
bool is_loop_invariant = node->IsSwitch() &&
node->input_node(0, &switch_input).ok() &&
switch_input == n;
if (is_loop_invariant) {
shape_inference::InferenceContext* context =
shape_refiner->GetContext(n);
for (int i = 0; i < n->num_inputs(); i++) {
const Node* input_node;
if (n->input_node(i, &input_node).ok()) {
auto shapes_and_types = context->input_handle_shapes_and_types(i);
if (shapes_and_types) {
context->set_output_handle_shapes_and_types(0,
*shapes_and_types);
}
break;
}
}
}
}
}
}
return absl::OkStatus();
}
Status StoreOutputShapes(const Graph& graph, const ShapeRefiner& shape_refiner,
GraphShapeInfo* shape_info) {
for (const Node* node : graph.nodes()) {
shape_inference::InferenceContext* context = shape_refiner.GetContext(node);
if (!context) continue;
auto& outputs = (*shape_info)[node->name()];
outputs.resize(context->num_outputs());
for (int i = 0; i < context->num_outputs(); ++i) {
auto& output = outputs[i];
TF_RETURN_IF_ERROR(
ShapeHandleToTensorShape(context, context->output(i), &output.shape));
const auto* handle_shapes_and_types =
context->output_handle_shapes_and_types(i);
if (handle_shapes_and_types != nullptr) {
if (handle_shapes_and_types->size() == 1) {
TF_RETURN_IF_ERROR(ShapeHandleToTensorShape(
context, (*handle_shapes_and_types)[0].shape,
&output.handle_shape));
output.handle_type = (*handle_shapes_and_types)[0].dtype;
} else {
}
}
VLOG(4) << node->name() << " output " << i << " shape"
<< output.shape.DebugString() << " handle_type "
<< DataTypeString(output.handle_type) << " handle_shape "
<< output.handle_shape.DebugString();
}
}
return absl::OkStatus();
}
}
Status InferShapes(Graph* graph, const std::map<int, InferredShape>& arg_shapes,
const tensorflow::FunctionLibraryDefinition* fnlib_def,
GraphShapeInfo* shape_info) {
ShapeRefiner shape_refiner(graph->versions(), graph->op_registry());
shape_refiner.set_require_shape_inference_fns(false);
BackEdgeHelper back_edge;
TF_RETURN_IF_ERROR(back_edge.Remove(graph));
TF_RETURN_IF_ERROR(PropagateShapes(graph, arg_shapes,
back_edge.RemovedEdges(), &shape_refiner));
TF_RETURN_IF_ERROR(back_edge.Replace());
return StoreOutputShapes(*graph, shape_refiner, shape_info);
}
absl::StatusOr<InferredShape> MergeInferredShapes(const InferredShape& a,
const InferredShape& b) {
InferredShape result;
TF_RETURN_IF_ERROR(a.shape.MergeWith(b.shape, &result.shape));
if (a.handle_type == DT_INVALID) {
result.handle_type = b.handle_type;
} else if (b.handle_type == DT_INVALID) {
result.handle_type = a.handle_type;
} else if (a.handle_type == b.handle_type) {
result.handle_type = a.handle_type;
} else {
return errors::InvalidArgument(
"Mismatched resource types: ", DataTypeString(a.handle_type), " vs. ",
DataTypeString(b.handle_type));
}
TF_RETURN_IF_ERROR(
a.handle_shape.MergeWith(b.handle_shape, &result.handle_shape));
return result;
}
} | #include "tensorflow/compiler/jit/shape_inference.h"
#include <map>
#include <memory>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
TEST(ShapeInferenceTest, Basics) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT,
ops::Placeholder::Shape({2, 3}));
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT,
ops::Placeholder::Shape({3}));
auto c = ops::Placeholder(root.WithOpName("C"), DT_FLOAT);
auto d = ops::Add(root.WithOpName("D"), a, b);
auto e = ops::Add(root.WithOpName("E"), d, c);
auto f = ops::Neg(root.WithOpName("F"), e);
auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f});
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(root.ToGraph(graph.get()));
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(graph.get(), {},
nullptr, &shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"A", {PartialTensorShape({2, 3})}}, {"B", {PartialTensorShape({3})}},
{"C", {PartialTensorShape()}}, {"D", {PartialTensorShape({2, 3})}},
{"E", {PartialTensorShape()}}, {"F", {PartialTensorShape()}},
{"G", {PartialTensorShape()}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(*graph, shape_info, expected));
}
TEST(ShapeInferenceTest, UseArgShapesForVariableBatchSize) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto c = ops::Add(root.WithOpName("C"), a, b);
auto d = ops::Neg(root.WithOpName("D"), c);
a.node()->AddAttr("_index", 0);
b.node()->AddAttr("_index", 1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(root.ToGraph(graph.get()));
std::map<int, InferredShape> arg_shapes;
arg_shapes[0].shape = TensorShape({2, 3});
arg_shapes[1].shape = TensorShape({2, 3});
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(graph.get(), arg_shapes,
nullptr, &shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"A", {PartialTensorShape({2, 3})}},
{"B", {PartialTensorShape({2, 3})}},
{"C", {PartialTensorShape({2, 3})}},
{"D", {PartialTensorShape({2, 3})}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(*graph, shape_info, expected));
}
TEST(ShapeInferenceTest, UseArgShapesForVariableBatchSizeIncompleteUserArgs) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto c = ops::Add(root.WithOpName("C"), a, b);
auto d = ops::Neg(root.WithOpName("D"), c);
a.node()->AddAttr("_index", 0);
b.node()->AddAttr("_index", 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(root.ToGraph(graph.get()));
std::map<int, InferredShape> arg_shapes;
arg_shapes[0].shape = TensorShape({2, 3});
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(graph.get(), arg_shapes,
nullptr, &shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"A", {PartialTensorShape({2, 3})}},
{"B", {PartialTensorShape({2, 3})}},
{"C", {PartialTensorShape({2, 3})}},
{"D", {PartialTensorShape({2, 3})}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(*graph, shape_info, expected));
}
TEST(ShapeInferenceTest, WhileLoop) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32,
ops::Placeholder::Shape({}));
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32,
ops::Placeholder::Shape({}));
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), source, "aloop");
auto enter2 =
ops::internal::Enter(scope.WithOpName("while/Enter2"), source, "aloop");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_node =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_node.output_false);
auto identity = ops::Identity(scope.WithOpName("while/Identity"),
switch_node.output_true);
auto identity_shape =
ops::Const<int32>(scope.WithOpName("while/Identity/shape"), {});
auto identity_reshaped = ops::Reshape(
scope.WithOpName("while/Identity/reshaped"), identity, identity_shape);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity_reshaped, one);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), add);
auto sink = ops::Identity(scope.WithOpName("sink"), exit);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(&graph, {}, nullptr,
&shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"while/Identity", {PartialTensorShape()}},
{"while/add", {PartialTensorShape({})}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(graph, shape_info, expected));
}
TEST(ShapeInferenceTest, WhileLoopWithResource) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x =
ops::VarHandleOp(scope.WithOpName("x"), DT_FLOAT, TensorShape({2, 3}));
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), x, "aloop");
auto dummy = ops::Placeholder(scope.WithOpName("dummy"), DT_RESOURCE);
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto false_value = ops::Const<bool>(scope.WithOpName("false"), false);
auto loop_cond =
ops::LoopCond(scope.WithOpName("while/LoopCond"), false_value);
auto switch_node =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_node.output_false);
auto identity = ops::Identity(scope.WithOpName("while/Identity"),
switch_node.output_true);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), identity);
auto sink = ops::Identity(scope.WithOpName("sink"), exit);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(&graph, {}, nullptr,
&shape_info));
auto iter = shape_info.find("sink");
EXPECT_NE(iter, shape_info.end());
EXPECT_EQ(iter->second.size(), 1);
EXPECT_EQ(iter->second.at(0).handle_type, DT_FLOAT);
TensorShape resource_shape;
EXPECT_TRUE(iter->second.at(0).handle_shape.AsTensorShape(&resource_shape));
EXPECT_EQ(resource_shape, TensorShape({2, 3}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/shape_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/shape_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f350a9ab-b4e3-4c7d-ac03-af724def09d0 | cpp | tensorflow/tensorflow | scan_loop_accumulator_input_unification | third_party/xla/xla/service/scan_loop_accumulator_input_unification.cc | third_party/xla/xla/service/scan_loop_accumulator_input_unification_test.cc | #include "xla/service/scan_loop_accumulator_input_unification.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool LoopIndexIsReadOnly(const HloAliasAnalysis& alias_analysis,
HloInstruction* while_instr, int64_t idx) {
const HloDataflowAnalysis& dataflow_analysis =
alias_analysis.dataflow_analysis();
return !(
dataflow_analysis.GetValueSet(while_instr->while_init(), {idx})
.values()
.size() > 1 ||
dataflow_analysis.GetValueSet(while_instr, {idx}).values().size() > 1 ||
dataflow_analysis.GetUniqueValueAt(while_instr, {idx}) !=
dataflow_analysis.GetUniqueValueAt(while_instr->while_init(), {idx}));
}
std::vector<std::pair<HloInstruction*, HloInstruction*>>
FindAccumulatorInputPairs(const HloAliasAnalysis& alias_analysis,
HloInstruction* while_instr,
const WhileLoopConfig& config) {
HloComputation* computation = while_instr->while_body();
HloInstruction* body_param = computation->parameter_instruction(0);
std::vector<HloInstruction*> possible_acc;
for (int64_t param_idx = 0;
param_idx < while_instr->while_init()->operand_count(); ++param_idx) {
for (HloInstruction* gte : body_param->users()) {
if (!Match(gte, match::GetTupleElement().WithTupleIndex(param_idx))) {
continue;
}
if (gte->operand(0) != body_param) {
continue;
}
if (gte->user_count() > 1 || gte->user_count() == 0) {
continue;
}
HloInstruction* gte_user = gte->users().at(0);
if (MatchShapeCoveringDynamicIndexInstruction(
gte_user, gte, HloOpcode::kDynamicUpdateSlice, config)
.has_value()) {
if (computation->root_instruction()->mutable_operand(param_idx) ==
gte_user) {
possible_acc.push_back(gte);
VLOG(3) << "accumulator index: " << param_idx << " = " << gte->name();
}
}
}
}
auto operand_index = [](HloInstruction* instr,
HloInstruction* operand) -> int64_t {
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (operand == instr->operand(i)) {
return i;
}
}
return -1;
};
auto find_gte_instr = [](HloInstruction* tuple,
int64_t idx) -> HloInstruction* {
for (HloInstruction* instr : tuple->parent()->MakeInstructionPostOrder()) {
HloInstruction* operand;
if (Match(instr, match::GetTupleElement()
.WithOperand(0, match::Op(&operand))
.WithTupleIndex(idx))) {
if (operand != tuple) {
continue;
}
return instr;
}
}
return nullptr;
};
auto check_single_user_not_null = [](HloInstruction* instr) -> bool {
if (instr == nullptr || instr->user_count() != 1) {
return false;
}
return true;
};
std::vector<std::pair<HloInstruction*, HloInstruction*>> acc_input_pairs;
HloComputation* outer_while_body = while_instr->parent();
for (HloInstruction* acc : possible_acc) {
VLOG(3) << "Looking for corresponding input for " << acc->name();
HloInstruction* acc_gte_outer_body =
find_gte_instr(while_instr, acc->tuple_index());
if (acc_gte_outer_body == nullptr) {
continue;
}
int64_t idx =
operand_index(outer_while_body->root_instruction(), acc_gte_outer_body);
VLOG(3) << "Accumulator output of the scan in the outer body = "
<< acc_gte_outer_body->name() << ", index = " << idx;
if (idx == -1) {
continue;
}
HloInstruction* input_gte_outer =
find_gte_instr(outer_while_body->parameter_instruction(0), idx);
if (!check_single_user_not_null(input_gte_outer)) {
continue;
}
if (input_gte_outer->users().at(0) != while_instr->while_init()) {
continue;
}
VLOG(3) << "Input parameter outer body = " << input_gte_outer->name()
<< ", index = " << input_gte_outer->tuple_index();
int64_t input_idx_inner =
operand_index(while_instr->while_init(), input_gte_outer);
HloInstruction* input_gte_inner =
find_gte_instr(computation->parameter_instruction(0), input_idx_inner);
if (!LoopIndexIsReadOnly(alias_analysis, while_instr, input_idx_inner)) {
continue;
}
VLOG(3) << "Input parameter scan body = " << input_gte_inner->name()
<< ", index = " << input_gte_inner->tuple_index();
if (input_gte_inner->user_count() != 2) {
continue;
}
HloInstruction* gte_user = input_gte_inner->users().at(0);
VLOG(3) << "User of the inner loop input = " << gte_user->ToString();
if (MatchShapeCoveringDynamicIndexInstruction(
gte_user, input_gte_inner, HloOpcode::kDynamicSlice, config)
.has_value()) {
acc_input_pairs.emplace_back(acc, input_gte_inner);
}
}
return acc_input_pairs;
}
absl::StatusOr<bool> UnifyAccumulatorWithInput(
const HloAliasAnalysis& alias_analysis,
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> unrollable_loops) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(&alias_analysis.dataflow_analysis().module());
auto is_while_body = [&](HloComputation* comp) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(comp);
return !callers.empty() && callers.at(0)->opcode() == HloOpcode::kWhile;
};
std::vector<HloInstruction*> changed_loops;
bool unified = false;
for (auto& [while_instr, loop_config] : unrollable_loops) {
if (!is_while_body(while_instr->parent())) {
continue;
}
auto acc_input_pairs =
FindAccumulatorInputPairs(alias_analysis, while_instr, loop_config);
for (const auto& [acc, input] : acc_input_pairs) {
if (Match(while_instr->while_init()->mutable_operand(acc->tuple_index()),
match::GetTupleElement(match::Parameter()))) {
continue;
}
VLOG(3) << while_instr->name() << " -> " << "<accumulator_@"
<< acc->tuple_index() << ": " << acc->name() << ", " << "input_@"
<< input->tuple_index() << ": " << input->name() << ">";
TF_RETURN_IF_ERROR(input->ReplaceAllUsesWith(acc));
TF_RETURN_IF_ERROR(while_instr->while_init()->ReplaceOperandWith(
acc->tuple_index(),
while_instr->while_init()->mutable_operand(input->tuple_index())));
if (input->user_count() == 0) {
TF_RETURN_IF_ERROR(while_instr->while_body()->RemoveInstruction(input));
}
unified = true;
}
}
return unified;
}
}
absl::StatusOr<bool> ScanLoopAccumulatorInputUnification::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before ScanLoopAccumulatorInputUnification:";
XLA_VLOG_LINES(2, module->ToString());
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> unrollable_loops =
WhileLoopUnroller::GetUnrollableLoops(module, execution_threads,
std::nullopt);
TF_ASSIGN_OR_RETURN(bool changed, UnifyAccumulatorWithInput(
*alias_analysis, unrollable_loops));
if (changed) {
for (auto& [while_instr, loop_config] : unrollable_loops) {
TF_RETURN_IF_ERROR(TryRemoveDeadWhileParams(while_instr).status());
}
TF_RETURN_IF_ERROR(TupleSimplifier{}.Run(module).status());
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
VLOG(2) << "HLO module after ScanLoopAccumulatorInputUnification:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after ScanLoopAccumulatorInputUnification";
}
return changed;
}
} | #include "xla/service/scan_loop_accumulator_input_unification.h"
#include <memory>
#include <optional>
#include <utility>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/copy_insertion.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ScanLoopAccumulatorInputUnificationTest = HloTestBase;
HloInstruction* GetTopLevelWhileInstruction(HloModule* module) {
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
return instr;
}
}
return nullptr;
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, UnifyAccumulatorInput) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
tuple.8 = (s32[], s32[], s32[8]) tuple(constant.3, init, array)
while = (s32[], s32[], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, UnifyAccumulatorInput2) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[8] get-tuple-element(wide.arg_tuple.8), index=4
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=5
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.56, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice.1)
add.2 = s32[] multiply(get-tuple-element.47, reshape.4)
reshape.5 = s32[1] reshape(add.2)
dynamic-update-slice.1 = s32[8] dynamic-update-slice(get-tuple-element.55, reshape.5, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, dynamic-update-slice.1, get-tuple-element.56)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
broadcast2 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48, broadcast2, get-tuple-element.54)
while = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=4
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.41)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
array2 = s32[8] constant({10,20,30,40,50,60,70,80})
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, array2)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[8]) tuple(get-tuple-element.40, get-tuple-element.41)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
EXPECT_EQ(instr->while_init()->operand(3)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, AccumulatorAllocateOutside) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, get-tuple-element.54, get-tuple-element.48)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.48, get-tuple-element.40)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
buffer = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, buffer)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=3
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, InputDifferentShape) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8,10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8,10] get-tuple-element(wide.arg_tuple.8), index=3
zero = s32[] constant(0)
dynamic-slice.0 = s32[1,10] dynamic-slice(get-tuple-element.54, get-tuple-element.46, zero), dynamic_slice_sizes={1,10}
reshape.2 = s32[10] reshape(dynamic-slice.0)
dynamic-slice.1 = s32[1] dynamic-slice(reshape.2, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.1)
add.1 = s32[] add(get-tuple-element.47, reshape.3)
reshape.4 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.4, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8,10]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8,10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8,10] parameter(0)
broadcast.5 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8,10]) tuple(constant.3, init, broadcast.5, array)
while = (s32[], s32[], s32[8], s32[8,10]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.39 = s32[] get-tuple-element(while), index=1
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, MultipleUsersInput) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[8] get-tuple-element(wide.arg_tuple.8), index=4
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=5
mult = s32[8] multiply(get-tuple-element.54, get-tuple-element.54)
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.56, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice.1)
add.2 = s32[] multiply(get-tuple-element.47, reshape.4)
reshape.5 = s32[1] reshape(add.2)
dynamic-update-slice.1 = s32[8] dynamic-update-slice(get-tuple-element.55, reshape.5, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, dynamic-update-slice.1, get-tuple-element.56)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
broadcast2 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.54, broadcast2, get-tuple-element.56)
while = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=4
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.41)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
array2 = s32[8] constant({10,20,30,40,50,60,70,80})
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, array2)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[8]) tuple(get-tuple-element.40, get-tuple-element.41)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest,
UnifyAccumulatorInputCheckCopy) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[10] get-tuple-element(wide.arg_tuple.8), index=4
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.55, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.1)
add.1 = s32[] add(reshape.3, reshape.2)
add.2 = s32[] add(add.1, get-tuple-element.47)
reshape.4 = s32[1] reshape(add.2)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.4, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[10]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, get-tuple-element.55)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.55 = s32[10] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[10]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48, get-tuple-element.55)
while = (s32[], s32[], s32[8], s32[8], s32[10]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[10]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.55)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
other_input = s32[10] constant({10,20,30,40,50,60,70,80,90,100})
tuple.8 = (s32[], s32[], s32[8], s32[10]) tuple(constant.3, init, array, other_input)
while = (s32[], s32[], s32[8], s32[10]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.39 = s32[8] get-tuple-element(while), index=2
get-tuple-element.40 = s32[10] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[10]) tuple(get-tuple-element.39, get-tuple-element.40)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool clone_copy_inserted,
CopyInsertion().Run(module_clone.get()));
EXPECT_TRUE(clone_copy_inserted);
HloInstruction* while_instruction =
GetTopLevelWhileInstruction(module_clone.get());
EXPECT_EQ(
while_instruction->while_body()->root_instruction()->operand(2)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
TF_ASSERT_OK_AND_ASSIGN(bool copy_inserted,
CopyInsertion().Run(module.get()));
EXPECT_TRUE(copy_inserted);
VLOG(3) << "After copy_insertion:\n" << module->ToString();
while_instruction = GetTopLevelWhileInstruction(module.get());
EXPECT_NE(
while_instruction->while_body()->root_instruction()->operand(2)->opcode(),
HloOpcode::kCopy);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scan_loop_accumulator_input_unification.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scan_loop_accumulator_input_unification_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ceab836-2f6f-4d6c-85b5-579dbed57ba0 | cpp | tensorflow/tensorflow | dot_dimension_merger | third_party/xla/xla/service/dot_dimension_merger.cc | third_party/xla/xla/service/dot_dimension_merger_test.cc | #include "xla/service/dot_dimension_merger.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::vector<int64_t> ShiftDimensions(absl::Span<const int64_t> dimensions,
const int64_t start, const int64_t shift) {
std::vector<int64_t> new_dimensions;
new_dimensions.reserve(dimensions.size());
for (const int64_t i : dimensions) {
if (i < start) {
new_dimensions.push_back(i);
} else {
new_dimensions.push_back(i - shift);
}
}
return new_dimensions;
}
class BatchDimensionMerger : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* dot) override {
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
const Shape& lhs_shape = dot->operand(0)->shape();
const Shape& rhs_shape = dot->operand(1)->shape();
CHECK_EQ(dnums.lhs_batch_dimensions_size(),
dnums.rhs_batch_dimensions_size());
const int64_t batch_dimension_count = dnums.lhs_batch_dimensions_size();
if (batch_dimension_count < 2 ||
!DistinctNumbersAreConsecutiveIfSorted(dnums.lhs_batch_dimensions()) ||
!DistinctNumbersAreConsecutiveIfSorted(dnums.rhs_batch_dimensions()) ||
!absl::c_is_sorted(dnums.lhs_batch_dimensions()) ||
!absl::c_is_sorted(dnums.rhs_batch_dimensions()) ||
!LayoutUtil::AreDimensionsConsecutive(lhs_shape.layout(),
dnums.lhs_batch_dimensions()) ||
!LayoutUtil::AreDimensionsConsecutive(rhs_shape.layout(),
dnums.rhs_batch_dimensions())) {
return absl::OkStatus();
}
const int64_t lhs_batch_dimension =
*absl::c_min_element(dnums.lhs_batch_dimensions());
const int64_t rhs_batch_dimension =
*absl::c_min_element(dnums.rhs_batch_dimensions());
int64_t batch_size = 1;
for (const int64_t dimension_number : dnums.lhs_batch_dimensions()) {
batch_size *= lhs_shape.dimensions(dimension_number);
}
auto merge_batch_dims = [&](Shape old_shape, int64_t batch_dim) {
Shape new_shape = old_shape;
for (int64_t i = 1; i < batch_dimension_count; ++i) {
new_shape.DeleteDimension(batch_dim + 1);
}
new_shape.set_dimensions(batch_dim, batch_size);
return new_shape;
};
Shape new_lhs_shape = merge_batch_dims(lhs_shape, lhs_batch_dimension);
Shape new_rhs_shape = merge_batch_dims(rhs_shape, rhs_batch_dimension);
DotDimensionNumbers new_dot_dimension_numbers;
new_dot_dimension_numbers.add_lhs_batch_dimensions(lhs_batch_dimension);
new_dot_dimension_numbers.add_rhs_batch_dimensions(rhs_batch_dimension);
{
const std::vector<int64_t> shifted_contracting_dimensions =
ShiftDimensions(dnums.lhs_contracting_dimensions(),
lhs_batch_dimension, batch_dimension_count - 1);
new_dot_dimension_numbers.mutable_lhs_contracting_dimensions()->Assign(
shifted_contracting_dimensions.begin(),
shifted_contracting_dimensions.end());
}
{
const std::vector<int64_t> shifted_contracting_dimensions =
ShiftDimensions(dnums.rhs_contracting_dimensions(),
rhs_batch_dimension, batch_dimension_count - 1);
new_dot_dimension_numbers.mutable_rhs_contracting_dimensions()->Assign(
shifted_contracting_dimensions.begin(),
shifted_contracting_dimensions.end());
}
auto sparsity = Cast<HloDotInstruction>(dot)->sparsity();
std::vector<SparsityDescriptor> new_sparsity(sparsity.begin(),
sparsity.end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
SparsityDescriptor& descriptor = new_sparsity[i];
int64_t sparse_batch_dim =
descriptor.index() == 0 ? lhs_batch_dimension : rhs_batch_dimension;
if (descriptor.dimension() > sparse_batch_dim)
descriptor.set_dimension(descriptor.dimension() -
(batch_dimension_count - 1));
HloInstruction* meta =
dot->mutable_operand(HloDotInstruction::kOperands + i);
Shape new_meta_shape = merge_batch_dims(meta->shape(), sparse_batch_dim);
TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeReshapeHlo(new_meta_shape, meta));
}
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_lhs,
MakeReshapeHlo(new_lhs_shape, dot->mutable_operand(0)));
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_rhs,
MakeReshapeHlo(new_rhs_shape, dot->mutable_operand(1)));
Shape new_dot_shape = merge_batch_dims(dot->shape(), 0);
HloInstruction* new_dot = dot->parent()->AddInstruction(
HloInstruction::CreateDot(new_dot_shape, reshaped_lhs, reshaped_rhs,
new_dot_dimension_numbers,
dot->precision_config(), new_sparsity,
sparse_meta),
&dot->metadata());
dot->SetupDerivedInstruction(new_dot);
std::unique_ptr<HloInstruction> out_reshape =
HloInstruction::CreateReshape(dot->shape(), new_dot);
return ReplaceWithNewInstruction(dot, std::move(out_reshape));
}
};
}
absl::StatusOr<bool> DotDimensionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return BatchDimensionMerger().RunOnModule(module, execution_threads);
}
} | #include "xla/service/dot_dimension_merger.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using DotDimensionMergerTest = HloTestBase;
TEST_F(DotDimensionMergerTest, MergeConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[79,2,4,12,11] parameter(0)
p1 = bf16[79,2,4,11,44] parameter(1)
ROOT d = bf16[2,4,12,44] dot(p0, p1),
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},
metadata={op_name="testname"}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,2,1,0} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{3,2,1,0} reshape(%p1)
; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,1,0} dot(%[[R0]], %[[R1]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,2,1,0} reshape(%[[DOT]])
; CHECK-SAME: metadata={op_name="testname"}
)");
}
TEST_F(DotDimensionMergerTest,
MergeConsecutiveBatchDimensionsNonDefaultLayouts) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[79,2,4,12,11]{4,0,3,2,1} parameter(0)
p1 = bf16[79,2,4,11,44]{3,0,4,2,1} parameter(1)
ROOT d = bf16[2,4,12,44]{3,1,0,2} dot(p0, p1),
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},
metadata={op_name="testname"}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,0,2,1} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{2,0,3,1} reshape(%p1)
; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,0,1} dot(%[[R0]], %[[R1]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,1,0,2} reshape(%[[DOT]])
; CHECK-SAME: metadata={op_name="testname"}
)");
}
TEST_F(DotDimensionMergerTest, SkipPhysicallyNonConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[2,4,12,13]{3,1,2,0} parameter(0)
p1 = bf16[2,4,13,55]{3,2,1,0} parameter(1)
ROOT d = bf16[2,4,12,55]{3,2,1,0} dot(p0, p1),
lhs_batch_dims={0,1}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SkipUnsortedBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[4,2,12,13] parameter(0)
p1 = bf16[2,4,13,55] parameter(1)
ROOT d = bf16[2,4,12,55] dot(p0, p1),
lhs_batch_dims={1,0}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SkipLogicallyNonConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[2,12,4,13] parameter(0)
p1 = bf16[2,4,13,55] parameter(1)
ROOT d = bf16[2,4,12,55] dot(p0, p1),
lhs_batch_dims={0,2}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SparseDotUpdatesDescriptor) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[3,4,5,6,16] parameter(0)
p1 = bf16[3,4,5,32,6] parameter(1)
meta = u16[3,4,5,6,2] parameter(2)
ROOT d = bf16[4,5,6,6] dot(p0, p1, meta), sparsity=L.4@2:4,
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[3,20,6,16]{3,2,1,0} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[3,20,32,6]{3,2,1,0} reshape(%p1)
; CHECK: %[[R2:.*]] = u16[3,20,6,2]{3,2,1,0} reshape(%meta)
; CHECK: %[[DOT:.*]] = bf16[20,6,6]{2,1,0} dot(%[[R0]], %[[R1]], %[[R2]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-SAME: sparsity=L.3@2:4
; CHECK-NEXT: ROOT {{.+}} = bf16[4,5,6,6]{3,2,1,0} reshape(%[[DOT]])
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1838a903-3723-4d83-9f01-335ffe87693a | cpp | tensorflow/tensorflow | hlo_module_config | third_party/xla/xla/service/hlo_module_config.cc | third_party/xla/xla/service/hlo_module_config_test.cc | #include "xla/service/hlo_module_config.h"
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_layout.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrAppend;
HloModuleConfig::HloModuleConfig(const ProgramShape& program_shape,
bool ignore_layouts)
: entry_computation_layout_(
ComputationLayout(program_shape, ignore_layouts)) {}
HloModuleConfig::HloModuleConfig(ComputationLayout entry_computation_layout)
: entry_computation_layout_(std::move(entry_computation_layout)) {}
void HloModuleConfig::SetDefaultComputationLayout(
const ProgramShape& program_shape) {
entry_computation_layout_ = ComputationLayout(program_shape);
}
void HloModuleConfig::SetComputationLayoutIfExists(
const ProgramShape& program_shape) {
entry_computation_layout_ = ComputationLayout(program_shape,
false);
}
std::string HloModuleConfig::compilation_cache_key() const {
std::string key = absl::StrCat("profiling=", hlo_profiling_enabled());
StrAppend(&key, "::(");
std::vector<std::string> params;
if (entry_computation_layout_.has_value()) {
for (const ShapeLayout& param_layout :
entry_computation_layout_->parameter_layouts()) {
params.push_back(param_layout.shape().DebugString());
}
StrAppend(&key, absl::StrJoin(params, ", "), ") => ",
entry_computation_layout_->result_shape().SerializeAsString());
}
if (seed() != 0) {
static std::atomic<int> counter{0};
StrAppend(&key, "forcing recompile ", counter++);
}
if (replica_count() != 1) {
StrAppend(&key, "::replica_count=", replica_count());
}
StrAppend(&key, debug_options_.DebugString());
if (intra_op_parallelism_threads() > 0) {
StrAppend(&key, "::intra_op_parallelism_threads=",
intra_op_parallelism_threads());
}
if (!device_type().empty()) {
StrAppend(&key, device_type());
}
StrAppend(&key, "::alias_passthrough_params=", alias_passthrough_params_);
StrAppend(&key, "::allow_spmd_sharding_propagation_to_parameters={",
absl::StrJoin(allow_spmd_sharding_propagation_to_parameters_, ","),
"}");
StrAppend(&key, "::allow_spmd_sharding_propagation_to_output={",
absl::StrJoin(allow_spmd_sharding_propagation_to_output_, ","),
"}");
if (!fdo_profile().empty()) {
StrAppend(&key, "::fdo_profile=", absl::BytesToHexString(fdo_profile()));
}
if (device_memory_size() != 0) {
StrAppend(&key, "::device_memory_size=", device_memory_size());
}
StrAppend(&key, "::use_shardy_partitioner=", use_shardy_partitioner());
return key;
}
void HloModuleConfig::AssignProtoShardableValueUpdatePairs(
tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>*
proto_update_pairs,
const std::vector<HloModuleConfig::ShardableValueUpdatePair>&
update_pairs) {
using ProtoShard = std::decay_t<decltype(proto_update_pairs->at(0))>;
proto_update_pairs->Reserve(update_pairs.size());
for (const auto& pair : update_pairs) {
ProtoShard shard;
shard.set_input_parameter_number(pair.input_parameter_number);
for (int64_t val : pair.parameter_shape_index) {
shard.add_parameter_shape_index(val);
}
for (int64_t val : pair.output_shape_index) {
shard.add_output_shape_index(val);
}
proto_update_pairs->Add(std::move(shard));
}
}
static HloModuleConfigProto::BoolList BoolVectorToProto(
const std::vector<bool>& vals) {
HloModuleConfigProto::BoolList list;
for (int i = 0; i < vals.size(); ++i) {
list.add_vals(vals[i]);
}
return list;
}
static void AssignProtoFusionConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<bool>>& fusion_config) {
auto* proto_config = proto.mutable_fusion_config();
proto_config->Reserve(fusion_config.size());
for (const auto& vals : fusion_config) {
proto_config->Add(BoolVectorToProto(vals));
}
}
static void AssignProtoDotConfig(
HloModuleConfigProto& proto,
const absl::flat_hash_map<std::string, std::vector<int64_t>>& dot_config) {
std::map<std::string, std::vector<int64_t>> sorted_dot_config;
sorted_dot_config.insert(dot_config.begin(), dot_config.end());
for (const auto& [key, list_vector] : sorted_dot_config) {
HloModuleConfigProto::Int64List list;
for (int64_t val : list_vector) {
list.add_vals(val);
}
proto.mutable_dot_config()->try_emplace(key, std::move(list));
}
}
static void AssignProtoLayoutConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<std::vector<int64_t>>>& layout_config) {
auto* proto_layout_config = proto.mutable_layout_config();
proto_layout_config->Reserve(layout_config.size());
for (const auto& config_row : layout_config) {
HloModuleConfigProto::Int64ListList proto_list_list;
proto_list_list.mutable_lists()->Reserve(config_row.size());
for (const auto& cell : config_row) {
HloModuleConfigProto::Int64List list;
for (int64_t val : cell) {
list.add_vals(val);
}
*proto_list_list.add_lists() = std::move(list);
}
proto_layout_config->Add(std::move(proto_list_list));
}
}
static void AssignProtoPhaseOrderingConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<bool>>& phase_config) {
auto* proto_config = proto.mutable_phase_ordering_config();
proto_config->Reserve(phase_config.size());
for (const auto& vals : phase_config) {
proto_config->Add(BoolVectorToProto(vals));
}
}
void HloModuleConfig::AssignStructShardableValueUpdatePairs(
HloModuleConfig& config,
const tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>&
pairs) {
std::vector<HloModuleConfig::ShardableValueUpdatePair> cfg_pairs;
cfg_pairs.reserve(pairs.size());
for (const auto& proto_pair : pairs) {
HloModuleConfig::ShardableValueUpdatePair pair;
pair.input_parameter_number = proto_pair.input_parameter_number();
const auto param_idx = proto_pair.parameter_shape_index();
pair.parameter_shape_index.assign(param_idx.begin(), param_idx.end());
const auto output_idx = proto_pair.output_shape_index();
pair.output_shape_index.assign(output_idx.begin(), output_idx.end());
cfg_pairs.push_back(pair);
}
config.set_shardable_value_update_pairs(std::move(cfg_pairs));
}
static void AssignStructFusionConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<bool>> module_config;
auto& proto_config = proto.fusion_config();
module_config.reserve(proto_config.size());
for (auto& list : proto_config) {
std::vector<bool> temp;
for (bool val : list.vals()) {
temp.push_back(val);
}
module_config.push_back(std::move(temp));
}
*config.mutable_fusion_config() = std::move(module_config);
}
static void AssignStructDotConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
auto& proto_config = proto.dot_config();
for (auto& [key, int_list] : proto_config) {
std::vector<int64_t> value{int_list.vals().begin(), int_list.vals().end()};
config.mutable_dot_config()->insert(std::pair{key, value});
}
}
static void AssignStructLayoutConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<std::vector<int64_t>>> module_config;
auto proto_config = proto.layout_config();
module_config.reserve(proto_config.size());
for (const auto& proto_row_wrapper : proto_config) {
const auto& proto_row = proto_row_wrapper.lists();
std::vector<std::vector<int64_t>> module_row;
module_row.reserve(proto_row.size());
for (const auto& proto_cell : proto_row) {
const auto& cell = proto_cell.vals();
module_row.push_back(std::vector<int64_t>(cell.begin(), cell.end()));
}
module_config.push_back(std::move(module_row));
}
*config.mutable_layout_config() = std::move(module_config);
}
static void AssignStructPhaseOrderingConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<bool>> module_config;
auto& proto_config = proto.phase_ordering_config();
module_config.reserve(proto_config.size());
for (auto& list : proto_config) {
std::vector<bool> temp;
for (bool val : list.vals()) {
temp.push_back(val);
}
module_config.push_back(std::move(temp));
}
*config.mutable_phase_ordering_config() = std::move(module_config);
}
HloModuleConfigProto HloModuleConfig::ToProto() const {
HloModuleConfigProto proto;
if (has_entry_computation_layout()) {
*proto.mutable_entry_computation_layout() =
entry_computation_layout().ComputeProgramShape().ToProto();
}
proto.set_seed(seed_);
proto.set_launch_id(launch_id_);
proto.set_replica_count(replica_count_);
proto.set_num_partitions(num_partitions_);
for (bool requirement : param_requires_broadcast_via_collectives_) {
proto.add_param_requires_broadcast_via_collectives(requirement);
}
proto.set_use_spmd_partitioning(use_spmd_partitioning_);
proto.set_use_auto_spmd_partitioning(use_auto_spmd_partitioning_);
for (int64_t partitioning_shape : auto_spmd_partitioning_mesh_shape_) {
proto.add_auto_spmd_partitioning_mesh_shape(partitioning_shape);
}
for (int64_t partitioning_id : auto_spmd_partitioning_mesh_ids_) {
proto.add_auto_spmd_partitioning_mesh_ids(partitioning_id);
}
proto.set_deduplicate_hlo(deduplicate_hlo_);
proto.set_intra_op_parallelism_threads(intra_op_parallelism_threads_);
proto.set_device_type(device_type_);
*proto.mutable_debug_options() = debug_options_;
if (has_static_device_assignment()) {
auto proto_assignment = proto.mutable_static_device_assignment();
static_device_assignment_->Serialize(proto_assignment);
}
AssignProtoShardableValueUpdatePairs(
proto.mutable_shardable_value_update_pairs(),
shardable_value_update_pairs_);
proto.set_alias_passthrough_params(alias_passthrough_params_);
proto.set_content_aware_computation_sorting(
content_aware_computation_sorting_);
proto.set_fusion_config_collection(
static_cast<HloModuleConfigProto::FusionConfigCollection>(
fusion_config_collection_));
AssignProtoFusionConfig(proto, fusion_config_);
AssignProtoDotConfig(proto, dot_config_);
AssignProtoLayoutConfig(proto, layout_config_);
for (uint64_t cfg : memory_space_assignment_config_) {
proto.add_memory_space_assignment_config(cfg);
}
AssignProtoPhaseOrderingConfig(proto, phase_ordering_config_);
proto.set_phase_index(phase_index_);
for (bool value : allow_spmd_sharding_propagation_to_parameters_) {
proto.add_allow_spmd_sharding_propagation_to_parameters(value);
}
for (bool value : allow_spmd_sharding_propagation_to_output_) {
proto.add_allow_spmd_sharding_propagation_to_output(value);
}
auto proto_analysis_map = proto.mutable_analysis_allowance_map();
for (const auto& [key, value] : analysis_allowance_map_) {
proto_analysis_map->insert({std::string(key), value});
}
proto.set_matrix_unit_operand_precision(matrix_unit_operand_precision_);
proto.set_allow_separate_sharding_programs(allow_separate_sharding_programs_);
proto.set_fdo_profile(fdo_profile_);
proto.set_device_memory_size(device_memory_size_);
proto.set_use_shardy_partitioner(use_shardy_partitioner_);
return proto;
}
absl::StatusOr<std::unique_ptr<HloModuleConfig>>
HloModuleConfig::CreateFromProto(const HloModuleConfigProto& proto) {
auto config = std::make_unique<HloModuleConfig>();
if (proto.has_entry_computation_layout()) {
auto comp_layout = ProgramShape{proto.entry_computation_layout()};
config->SetComputationLayoutIfExists(comp_layout);
} else {
config->clear_entry_computation_layout();
}
config->seed_ = proto.seed();
config->launch_id_ = proto.launch_id();
config->replica_count_ = proto.replica_count();
config->num_partitions_ = proto.num_partitions();
config->param_requires_broadcast_via_collectives_.assign(
proto.param_requires_broadcast_via_collectives().begin(),
proto.param_requires_broadcast_via_collectives().end());
config->use_spmd_partitioning_ = proto.use_spmd_partitioning();
config->use_auto_spmd_partitioning_ = proto.use_auto_spmd_partitioning();
config->auto_spmd_partitioning_mesh_shape_.assign(
proto.auto_spmd_partitioning_mesh_shape().begin(),
proto.auto_spmd_partitioning_mesh_shape().end());
config->auto_spmd_partitioning_mesh_ids_.assign(
proto.auto_spmd_partitioning_mesh_ids().begin(),
proto.auto_spmd_partitioning_mesh_ids().end());
config->deduplicate_hlo_ = proto.deduplicate_hlo();
config->intra_op_parallelism_threads_ = proto.intra_op_parallelism_threads();
config->device_type_ = proto.device_type();
if (proto.has_debug_options()) {
config->debug_options_ = proto.debug_options();
}
if (proto.has_static_device_assignment()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<DeviceAssignment> device_assignment,
DeviceAssignment::Deserialize(proto.static_device_assignment()));
config->static_device_assignment_ = std::move(*device_assignment);
}
AssignStructShardableValueUpdatePairs(*config,
proto.shardable_value_update_pairs());
config->alias_passthrough_params_ = proto.alias_passthrough_params();
config->content_aware_computation_sorting_ =
proto.content_aware_computation_sorting();
config->fusion_config_collection_ =
static_cast<FusionConfigCollection>(proto.fusion_config_collection());
AssignStructFusionConfig(*config, proto);
AssignStructDotConfig(*config, proto);
AssignStructLayoutConfig(*config, proto);
config->memory_space_assignment_config_.assign(
proto.memory_space_assignment_config().begin(),
proto.memory_space_assignment_config().end());
AssignStructPhaseOrderingConfig(*config, proto);
config->phase_index_ = proto.phase_index();
config->allow_spmd_sharding_propagation_to_parameters_.assign(
proto.allow_spmd_sharding_propagation_to_parameters().begin(),
proto.allow_spmd_sharding_propagation_to_parameters().end());
config->allow_spmd_sharding_propagation_to_output_.assign(
proto.allow_spmd_sharding_propagation_to_output().begin(),
proto.allow_spmd_sharding_propagation_to_output().end());
config->analysis_allowance_map_.insert(proto.analysis_allowance_map().begin(),
proto.analysis_allowance_map().end());
config->matrix_unit_operand_precision_ =
proto.matrix_unit_operand_precision();
config->allow_separate_sharding_programs_ =
proto.allow_separate_sharding_programs();
config->fdo_profile_ = proto.fdo_profile();
config->device_memory_size_ = proto.device_memory_size();
config->use_shardy_partitioner_ = proto.use_shardy_partitioner();
return std::move(config);
}
} | #include "xla/service/hlo_module_config.h"
#include <string>
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(HloModuleConfigTest, ShardableValueUpdatePairProtoRoundTrip) {
const std::string text_proto = R"(
shardable_value_update_pairs {
input_parameter_number: 2
parameter_shape_index: 0
parameter_shape_index: 1
output_shape_index: 1
output_shape_index: 0
}
shardable_value_update_pairs {
input_parameter_number: 1
parameter_shape_index: 2
output_shape_index: 3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto input_proto,
ParseTextProto<HloModuleConfigProto>(text_proto));
HloModuleConfig config;
HloModuleConfig::AssignStructShardableValueUpdatePairs(
config, input_proto.shardable_value_update_pairs());
EXPECT_EQ(config.shardable_value_update_pairs().size(), 2);
HloModuleConfigProto output_proto;
HloModuleConfig::AssignProtoShardableValueUpdatePairs(
output_proto.mutable_shardable_value_update_pairs(),
config.shardable_value_update_pairs());
EXPECT_EQ(input_proto.SerializeAsString(), output_proto.SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e623aace-d631-4870-923c-4ed013853e68 | cpp | tensorflow/tensorflow | all_reduce_promotion | third_party/xla/xla/service/all_reduce_promotion.cc | third_party/xla/xla/service/all_reduce_promotion_test.cc | #include "xla/service/all_reduce_promotion.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
bool IsAllReduce(const HloInstruction* inst) {
return inst->opcode() == HloOpcode::kAllReduce ||
inst->opcode() == HloOpcode::kReduceScatter;
}
std::unique_ptr<HloInstruction> CloneAllReduce(
const HloInstruction* inst, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
std::unique_ptr<HloInstruction> new_inst =
inst->CloneWithNewOperands(shape, operands);
HloComputation* to_apply = new_inst->to_apply();
HloComputation* to_apply_promoted = [&]() {
PrimitiveType type = shape.element_type();
std::string name = absl::StrCat(to_apply->name(), "_promoted");
HloComputation::Builder promoted(name);
auto x = promoted.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = promoted.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
promoted.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), to_apply->root_instruction()->opcode(),
x, y));
return inst->GetModule()->AddEmbeddedComputation(promoted.Build());
}();
new_inst->set_to_apply(to_apply_promoted);
to_apply_promoted->SetCollectiveCallInstruction(new_inst.get());
return new_inst;
}
}
AllReducePromotion::AllReducePromotion(
absl::Span<std::pair<PrimitiveType, PrimitiveType> const> from_to_types)
: pass_(from_to_types, IsAllReduce, CloneAllReduce) {}
absl::StatusOr<bool> AllReducePromotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return pass_.Run(module, execution_threads);
}
} | #include "xla/service/all_reduce_promotion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class AllReducePromotionTest : public HloTestBase {
public:
AllReducePromotion pass_{{{U16, U32}, {S16, S32}}};
};
TEST_F(AllReducePromotionTest, SimplePromotionAllReduce) {
absl::string_view hlo_text = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[2] all-reduce(a1), replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass_, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::AllReduce(m::Convert().WithShape(U32, {2}))
.WithShape(U32, {2}))
.WithShape(U16, {2})));
}
TEST_F(AllReducePromotionTest, SimplePromotionReduceScatter) {
absl::string_view hlo_text = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[1] reduce-scatter(a1), dimensions={0}, replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass_, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::ReduceScatter(m::Convert().WithShape(U32, {2}))
.WithShape(U32, {1}))
.WithShape(U16, {1})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_promotion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_promotion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52df2ebe-38c3-4314-9f53-753662d4aa23 | cpp | tensorflow/tensorflow | hlo_dce | third_party/xla/xla/service/hlo_dce.cc | third_party/xla/xla/service/hlo_dce_test.cc | #include "xla/service/hlo_dce.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <set>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsRemovableWhile(HloInstruction* instruction,
bool remove_cross_partition_collective_ops) {
if (instruction->opcode() != HloOpcode::kWhile) {
return false;
}
for (HloComputation* computation : instruction->called_computations()) {
for (HloInstruction* called_instr : computation->instructions()) {
auto maybe_collective_op =
DynCast<HloCollectiveInstruction>(called_instr);
if (called_instr->HasSideEffect() &&
(!remove_cross_partition_collective_ops || !maybe_collective_op ||
maybe_collective_op->constrain_layout())) {
return false;
}
}
}
return true;
}
absl::StatusOr<bool> RemoveMultiOutputFusionsUnusedOutputs(
HloComputation* computation) {
HloInstruction* fusion_instruction = computation->FusionInstruction();
if (!fusion_instruction) {
return false;
}
if (computation->root_instruction()->opcode() != HloOpcode::kTuple ||
computation->root_instruction()->has_sharding() ||
!fusion_instruction->output_operand_aliasing().empty() ||
fusion_instruction->HasControlDependencies() ||
fusion_instruction->IsCustomFusion()) {
return false;
}
std::set<int64_t> used_tuple_elements;
if (fusion_instruction->users().empty()) {
return false;
}
for (HloInstruction* gte : fusion_instruction->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
used_tuple_elements.insert(gte->tuple_index());
}
if (used_tuple_elements.size() ==
computation->root_instruction()->operand_count()) {
return false;
}
std::vector<Shape> tuple_shapes;
tuple_shapes.reserve(used_tuple_elements.size());
for (int64_t tuple_index : used_tuple_elements) {
tuple_shapes.push_back(
fusion_instruction->shape().tuple_shapes(tuple_index));
}
Shape new_shape = tuple_shapes.size() == 1
? tuple_shapes[0]
: ShapeUtil::MakeTupleShape(tuple_shapes);
*fusion_instruction->mutable_shape() = std::move(new_shape);
if (tuple_shapes.size() > 1) {
for (HloInstruction* gte : fusion_instruction->users()) {
auto it = used_tuple_elements.lower_bound(gte->tuple_index());
int64_t new_tuple_index = std::distance(used_tuple_elements.begin(), it);
gte->set_tuple_index(new_tuple_index);
}
} else {
std::vector<HloInstruction*> users(fusion_instruction->users());
for (HloInstruction* gte : users) {
TF_ASSIGN_OR_RETURN(std::ignore, gte->parent()->ReplaceInstruction(
gte, fusion_instruction,
true,
true));
}
}
if (tuple_shapes.size() > 1) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(used_tuple_elements.size());
for (int64_t tuple_index : used_tuple_elements) {
new_operands.push_back(
computation->root_instruction()->mutable_operand(tuple_index));
}
auto new_tuple =
computation->AddInstruction(HloInstruction::CreateTuple(new_operands));
TF_RETURN_IF_ERROR(computation->ReplaceInstructionWithDifferentShape(
computation->root_instruction(), new_tuple));
} else {
TF_RETURN_IF_ERROR(
computation->root_instruction()->ReplaceAllUsesWithDifferentShape(
computation->root_instruction()->mutable_operand(
*used_tuple_elements.begin())));
}
return true;
}
}
absl::StatusOr<bool> HloDCE::RunOnComputation(
HloComputation* computation, bool remove_cross_partition_collective_ops) {
TF_ASSIGN_OR_RETURN(bool changed,
RemoveMultiOutputFusionsUnusedOutputs(computation));
std::vector<HloInstruction*> dead_roots;
for (auto* instruction : computation->instructions()) {
auto maybe_collective_op = DynCast<HloCollectiveInstruction>(instruction);
if (instruction->IsDead() && computation->IsSafelyRemovable(instruction) &&
(!instruction->IsCustomCall("Sharding") ||
(!instruction->operand(0)->IsRoot() &&
instruction->operand(0)->opcode() != HloOpcode::kParameter &&
instruction->operand(0)->user_count() == 1)) &&
(!instruction->HasSideEffect() ||
(remove_cross_partition_collective_ops && maybe_collective_op &&
!maybe_collective_op->constrain_layout()) ||
IsRemovableWhile(instruction,
remove_cross_partition_collective_ops))) {
dead_roots.push_back(instruction);
}
}
for (HloInstruction* dead_root : dead_roots) {
VLOG(1) << "Removing dead root " << dead_root->ToString()
<< " and its unused operands";
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dead_root));
changed = true;
}
return changed;
}
absl::Status HloDCE::RecursivelyRemoveDeadComputation(
HloModule* module, HloComputation* computation,
absl::flat_hash_map<HloComputation*, int>& live_call_counts) {
std::vector<HloComputation*> to_be_deleted;
for (HloInstruction* instruction : computation->instructions()) {
for (HloComputation* subcomp : instruction->called_computations()) {
auto iter = live_call_counts.find(subcomp);
if (iter == live_call_counts.end()) {
return tsl::errors::Internal(
"called computation %s not found in live_call_counts table during "
"HloDCE",
subcomp->name());
}
int live_call_count = --iter->second;
CHECK_GE(live_call_count, 0);
if (live_call_count == 0) {
to_be_deleted.push_back(subcomp);
live_call_counts.erase(iter);
}
}
}
VLOG(1) << "Removing dead computation " << computation->name();
TF_RETURN_IF_ERROR(module->RemoveEmbeddedComputation(computation));
for (HloComputation* subcomp : to_be_deleted) {
TF_RETURN_IF_ERROR(
RecursivelyRemoveDeadComputation(module, subcomp, live_call_counts));
}
return absl::OkStatus();
}
absl::StatusOr<bool> HloDCE::RecursivelyRemoveDeadComputations(
HloModule* module) {
bool module_contains_dead_code = false;
absl::flat_hash_map<HloComputation*, int> live_computation_call_count;
if (HloComputation* entry_computation = module->entry_computation()) {
++live_computation_call_count[entry_computation];
}
for (auto* computation : module->MakeComputationPostOrder()) {
for (auto* instruction : computation->instructions()) {
for (auto* subcomp : instruction->called_computations()) {
++live_computation_call_count[subcomp];
}
}
}
for (auto* computation : module->MakeComputationPostOrder()) {
if (!live_computation_call_count.contains(computation)) {
TF_RETURN_IF_ERROR(RecursivelyRemoveDeadComputation(
module, computation, live_computation_call_count));
module_contains_dead_code = true;
}
}
return module_contains_dead_code;
}
absl::StatusOr<bool> HloDCE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(2) << "Before dce:";
XLA_VLOG_LINES(2, module->ToString());
auto computations = module->MakeComputationPostOrder(execution_threads);
std::reverse(computations.begin(), computations.end());
for (auto* computation : computations) {
TF_ASSIGN_OR_RETURN(
bool changed_for_computation,
RunOnComputation(computation, remove_cross_partition_collective_ops_));
changed |= changed_for_computation;
}
TF_ASSIGN_OR_RETURN(bool module_contains_dead_code,
RecursivelyRemoveDeadComputations(module));
changed |= module_contains_dead_code;
if (changed) {
VLOG(2) << "After dce:";
XLA_VLOG_LINES(2, module->ToString());
}
return changed;
}
} | #include "xla/service/hlo_dce.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class HloDceTest : public HloTestBase {
protected:
HloDceTest() {}
bool HasInstruction(const HloComputation& computation,
const HloInstruction* instruction) {
return absl::c_linear_search(computation.instructions(), instruction);
}
};
TEST_F(HloDceTest, NoDeadCode) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
}
TEST_F(HloDceTest, InstructionsWithSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
HloInstruction::CreateSend(constant, token, 0));
builder.AddInstruction(HloInstruction::CreateSendDone(send));
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloDceTest, CustomCallInstructionsWithSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto instr = Cast<HloCustomCallInstruction>(builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo")));
instr->set_custom_call_has_side_effect(true);
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloDceTest, AsyncCustomCallInstructionsWithSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto instr = Cast<HloCustomCallInstruction>(builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo")));
instr->set_custom_call_has_side_effect(true);
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN([[maybe_unused]] HloInstruction * async_done,
module->entry_computation()->CreateAsyncInstructions(
instr, {{ShapeUtil::MakeScalarShape(U32)}},
HloInstruction::kMainExecutionThread,
true, true));
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloDceTest, CustomCallInstructionsWithoutSideEffect) {
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo"));
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_TRUE(result);
}
TEST_F(HloDceTest, AsyncCustomCallInstructionsWithoutSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto instr = Cast<HloCustomCallInstruction>(builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo")));
instr->set_custom_call_has_side_effect(false);
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN([[maybe_unused]] HloInstruction * async_done,
module->entry_computation()->CreateAsyncInstructions(
instr, {{ShapeUtil::MakeScalarShape(U32)}},
HloInstruction::kMainExecutionThread,
true, true));
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_TRUE(result);
}
TEST_F(HloDceTest, ShardingCustomCallInstruction) {
auto builder = HloComputation::Builder(TestName());
auto p0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {10, 10}), "p0"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(p0->shape(), HloOpcode::kAdd, p0, p0));
auto dangling_sharding = builder.AddInstruction(
HloInstruction::CreateCustomCall(p0->shape(),
{add},
"Sharding"));
dangling_sharding->set_sharding(HloSharding::Tile(TileAssignment({2, 1})));
builder.AddInstruction(HloInstruction::CreateBinary(
p0->shape(), HloOpcode::kMultiply, add, add));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloDceTest, ShardingCustomCallInstructionWithDeadOperand) {
auto builder = HloComputation::Builder(TestName());
auto p0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {10, 10}), "p0"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(p0->shape(), HloOpcode::kAdd, p0, p0));
auto dangling_sharding = builder.AddInstruction(
HloInstruction::CreateCustomCall(p0->shape(),
{add},
"Sharding"));
dangling_sharding->set_sharding(HloSharding::Tile(TileAssignment({2, 1})));
builder.AddInstruction(
HloInstruction::CreateBinary(p0->shape(), HloOpcode::kMultiply, p0, p0));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
}
TEST_F(HloDceTest, DeadParameters) {
auto builder = HloComputation::Builder(TestName());
auto live_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "live_param"));
auto dead_param1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "dead_param1"));
builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "dead_param2"));
builder.AddInstruction(HloInstruction::CreateUnary(
dead_param1->shape(), HloOpcode::kNegate, dead_param1));
builder.AddInstruction(HloInstruction::CreateUnary(
live_param->shape(), HloOpcode::kNegate, live_param));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_EQ(1, dead_param1->user_count());
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_EQ(0, dead_param1->user_count());
}
TEST_F(HloDceTest, ControlDependencies) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
auto dead_negate = builder.AddInstruction(HloInstruction::CreateUnary(
constant1->shape(), HloOpcode::kNegate, constant1));
auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto dead_negate_with_control_dep =
builder.AddInstruction(HloInstruction::CreateUnary(
constant1->shape(), HloOpcode::kNegate, constant1));
auto dead_add_with_control_dep =
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(dead_negate_with_control_dep->AddControlDependencyTo(
dead_add_with_control_dep));
EXPECT_EQ(7, computation->instruction_count());
EXPECT_TRUE(HasInstruction(*computation, dead_negate));
EXPECT_TRUE(HasInstruction(*computation, dead_add));
EXPECT_TRUE(HasInstruction(*computation, dead_negate_with_control_dep));
EXPECT_TRUE(HasInstruction(*computation, dead_add_with_control_dep));
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_FALSE(HasInstruction(*computation, dead_negate));
EXPECT_FALSE(HasInstruction(*computation, dead_add));
EXPECT_TRUE(HasInstruction(*computation, dead_negate_with_control_dep));
EXPECT_TRUE(HasInstruction(*computation, dead_add_with_control_dep));
}
TEST_F(HloDceTest, DeadInstructionWithCalledComputation) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
auto callee_builder = HloComputation::Builder(TestName() + "-callee");
{
auto param = callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
callee_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, param));
}
auto called_computation =
module->AddEmbeddedComputation(callee_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto dead_call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {param}, called_computation));
builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, param));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_EQ(2, param->user_count());
EXPECT_EQ(0, dead_call->user_count());
EXPECT_TRUE(HasInstruction(*computation, dead_call));
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
EXPECT_EQ(1, param->user_count());
EXPECT_FALSE(HasInstruction(*computation, dead_call));
}
TEST_F(HloDceTest, CalledComputationWithSideEffect) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
auto cond_builder = HloComputation::Builder(TestName() + "-cond");
{
auto param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
auto constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,
constant, ComparisonDirection::kLt));
}
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder(TestName() + "-body");
{
auto param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token = body_builder.AddInstruction(HloInstruction::CreateToken());
auto infeed = body_builder.AddInstruction(
HloInstruction::CreateInfeed(shape, token, ""));
auto infeed_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, infeed, 0));
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, param, infeed_data));
}
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto live_while = builder.AddInstruction(HloInstruction::CreateWhile(
shape, cond_computation, body_computation, param));
builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, param));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_EQ(2, param->user_count());
EXPECT_EQ(0, live_while->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_while));
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_EQ(2, param->user_count());
EXPECT_EQ(0, live_while->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_while));
}
TEST_F(HloDceTest, CalledComputationWithNestedSideEffect) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
auto nested_callee_builder =
HloComputation::Builder(TestName() + "-nested_callee");
{
auto param = nested_callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token =
nested_callee_builder.AddInstruction(HloInstruction::CreateToken());
nested_callee_builder.AddInstruction(
HloInstruction::CreateOutfeed(shape, param, token, ""));
}
auto nested_called_computation =
module->AddEmbeddedComputation(nested_callee_builder.Build());
auto callee_builder = HloComputation::Builder(TestName() + "-callee");
{
auto param = callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
callee_builder.AddInstruction(HloInstruction::CreateCall(
ShapeUtil::MakeTokenShape(), {param}, nested_called_computation));
}
auto called_computation =
module->AddEmbeddedComputation(callee_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto live_call = builder.AddInstruction(HloInstruction::CreateCall(
ShapeUtil::MakeTokenShape(), {param}, called_computation));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, computation->instruction_count());
EXPECT_EQ(1, param->user_count());
EXPECT_EQ(0, live_call->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_call));
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
EXPECT_EQ(1, param->user_count());
EXPECT_EQ(0, live_call->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_call));
}
TEST_F(HloDceTest, RemoveDeadSubcomputation) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
HloComputation::Builder subcomp_builder("reduction_subcomp");
{
auto* param0 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param0"));
auto* param1 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param1"));
subcomp_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, param0, param1));
}
auto reduce_subcomp = module->AddEmbeddedComputation(subcomp_builder.Build());
builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(F32, {1}),
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param0")),
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
{0}, reduce_subcomp));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 1);
}
TEST_F(HloDceTest, KeepUsedSubcomputation) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
HloComputation::Builder subcomp_builder("reduction_subcomp");
{
auto* param0 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param0"));
auto* param1 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param1"));
subcomp_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, param0, param1));
}
auto reduce_subcomp = module->AddEmbeddedComputation(subcomp_builder.Build());
builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(F32, {}),
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param0")),
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
{0}, reduce_subcomp));
builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(F32, {}),
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {100}), "param1")),
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
{0}, reduce_subcomp));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
}
TEST_F(HloDceTest, RemovedNestedDeadComputations) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder called_subcomp_builder("called_dead_add");
{
auto* param0 =
called_subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
0, shape, "param0"));
auto* param1 =
called_subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
1, shape, "param1"));
called_subcomp_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, param0, param1));
}
auto called_subcomp =
module->AddEmbeddedComputation(called_subcomp_builder.Build());
{
HloComputation::Builder dead_subcomp_builder("dead_caller0");
auto* param0 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto* param1 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
dead_subcomp_builder.AddInstruction(
HloInstruction::CreateCall(shape, {param0, param1}, called_subcomp));
module->AddEmbeddedComputation(dead_subcomp_builder.Build());
}
{
HloComputation::Builder dead_subcomp_builder("dead_caller1");
auto* param0 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto* param1 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
dead_subcomp_builder.AddInstruction(
HloInstruction::CreateCall(shape, {param0, param1}, called_subcomp));
module->AddEmbeddedComputation(dead_subcomp_builder.Build());
}
HloComputation::Builder builder(TestName());
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 4);
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
EXPECT_EQ(module->MakeComputationPostOrder().size(), 1);
}
TEST_F(HloDceTest, MultiOutputFusionRemoveUnusedTupleElementsRemoveTuple) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
p2 = f32[32,32]{1,0} parameter(2)
add = f32[32,32]{1,0} add(p0, p1)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p2, add)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
param2 = f32[32,32]{1,0} parameter(2)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1, param2), kind=kLoop, calls=fused_add
gte.0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
ROOT gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(0), m::Parameter(1))
.WithShape(F32, {32, 32})));
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(
m::Add(m::Parameter(0), m::Parameter(1)).WithShape(F32, {32, 32})));
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
}
TEST_F(
HloDceTest,
MultiOutputFusionRemoveUnusedTupleElementsRemoveTupleMultiUsersPerOutput) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
p2 = f32[32,32]{1,0} parameter(2)
add = f32[32,32]{1,0} add(p0, p1)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p2, add, p2)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
param2 = f32[32,32]{1,0} parameter(2)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1, param2), kind=kLoop, calls=fused_add
gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
gte.1.again = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(gte.1, gte.1.again)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte.0");
EXPECT_EQ(gte_0, nullptr);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte.1");
EXPECT_EQ(gte_1, nullptr);
HloInstruction* gte_1_again = FindInstruction(module.get(), "gte.1.again");
EXPECT_EQ(gte_1_again, nullptr);
HloInstruction* fusion = FindInstruction(module.get(), "fusion");
ASSERT_NE(fusion, nullptr);
EXPECT_FALSE(fusion->shape().IsTuple());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(root->operand_count(), 2);
EXPECT_EQ(root->operand(0), fusion);
EXPECT_EQ(root->operand(1), fusion);
}
TEST_F(
HloDceTest,
MultiOutputFusionRemoveUnusedTupleElementsRemoveTupleNonContiguousRemoval) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
p2 = f32[32,32]{1,0} parameter(2)
add = f32[32,32]{1,0} add(p0, p1)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p2, add, p2, p2)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
param2 = f32[32,32]{1,0} parameter(2)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1, param2), kind=kLoop, calls=fused_add
gte.0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
gte.1.again = f32[32,32]{1,0} get-tuple-element(fusion), index=1
gte.3 = f32[32,32]{1,0} get-tuple-element(fusion), index=3
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(gte.1, gte.1.again, gte.3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
HloInstruction* gte_0 = FindInstruction(module.get(), "gte.0");
EXPECT_EQ(gte_0, nullptr);
HloInstruction* gte_1 = FindInstruction(module.get(), "gte.1");
EXPECT_NE(gte_1, nullptr);
EXPECT_EQ(static_cast<HloGetTupleElementInstruction*>(gte_1)->tuple_index(),
0);
HloInstruction* gte_1_again = FindInstruction(module.get(), "gte.1.again");
EXPECT_EQ(
static_cast<HloGetTupleElementInstruction*>(gte_1_again)->tuple_index(),
0);
EXPECT_NE(gte_1_again, nullptr);
HloInstruction* gte_3 = FindInstruction(module.get(), "gte.3");
EXPECT_NE(gte_3, nullptr);
EXPECT_EQ(static_cast<HloGetTupleElementInstruction*>(gte_3)->tuple_index(),
1);
HloInstruction* fusion = FindInstruction(module.get(), "fusion");
ASSERT_NE(fusion, nullptr);
EXPECT_TRUE(fusion->shape().IsTuple());
EXPECT_EQ(fusion->shape().tuple_shapes_size(), 2);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(root->operand_count(), 3);
EXPECT_EQ(root->operand(0), gte_1);
EXPECT_EQ(root->operand(1), gte_1_again);
EXPECT_EQ(root->operand(2), gte_3);
}
TEST_F(HloDceTest, MultiOutputFusionRemoveUnusedTupleElementAdjustTuple) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
add = f32[32,32]{1,0} add(p0, p1)
neg = f32[32,32]{1,0} negate(add)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(neg, p0, add)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1), kind=kLoop, calls=fused_add
gte.0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
gte.2 = f32[32,32]{1,0} get-tuple-element(fusion), index=2
ROOT add = f32[32,32]{1,0} add(gte.0, gte.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
Shape shape = ShapeUtil::MakeShape(F32, {32, 32});
Shape expected_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* fusion;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(
m::GetTupleElement(
m::Fusion(&fusion).WithShapeEqualTo(&expected_shape), 0),
m::GetTupleElement(m::Fusion(), 1))));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(
m::Tuple(m::Negate(), m::Add()).WithShapeEqualTo(&expected_shape)));
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
}
TEST_F(HloDceTest,
MultiOutputFusionRemoveUnusedTupleElementWithControlAdjustTupleAndDep) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
add = f32[32,32]{1,0} add(p0, p1)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, add)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1), kind=kLoop, calls=fused_add
gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
add.2 = f32[32,32]{1,0} add(param0, param1), control-predecessors={gte.1}
ROOT add = f32[32,32]{1,0} add(add.2, gte.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
HloInstruction* fusion;
HloInstruction* add2;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Add(&add2, m::Parameter(), m::Parameter()),
m::Fusion(&fusion))));
EXPECT_EQ(add2->control_predecessors().size(), 1);
EXPECT_EQ(add2->control_predecessors()[0], fusion);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_dce.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_dce_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3406a3e3-4682-478d-868d-9065687f1b4b | cpp | tensorflow/tensorflow | hlo_module_dce | third_party/xla/xla/service/hlo_module_dce.cc | third_party/xla/xla/service/hlo_module_dce_test.cc | #include "xla/service/hlo_module_dce.h"
#include <deque>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_liveness_analysis.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
absl::StatusOr<bool> RunWhileDCE(
HloModule* module, HloLivenessAnalysis* liveness,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> while_body_comps_to_dce;
for (auto* computation : module->computations(execution_threads)) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
const auto* xla_while = instruction;
auto* while_body_comp = xla_while->while_body();
auto* while_body_param = while_body_comp->parameter_instruction(0);
auto* while_body_root = while_body_comp->root_instruction();
if (!xla_while->shape().IsTuple() ||
while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(1) << "WhileDCE SKIP while: " << xla_while->ToString();
continue;
}
const int64_t tuple_element_count =
ShapeUtil::TupleElementCount(xla_while->shape());
bool modified_while_body_comp = false;
for (int64_t i = 0; i < tuple_element_count; ++i) {
if (liveness->IsLive(xla_while, {i})) {
continue;
}
VLOG(1) << "WhileDCE Dead while tuple element."
<< " while: " << xla_while->name() << " tuple_index: " << i;
HloInstruction* pass_thru_gte = while_body_comp->AddInstruction(
HloInstruction::CreateGetTupleElement(
while_body_param->shape().tuple_shapes(i), while_body_param,
i));
TF_RETURN_IF_ERROR(
while_body_root->ReplaceOperandWith(i, pass_thru_gte));
changed = true;
modified_while_body_comp = true;
}
if (modified_while_body_comp) {
while_body_comps_to_dce.push_back(while_body_comp);
}
}
}
for (auto* while_body_comp : while_body_comps_to_dce) {
TF_ASSIGN_OR_RETURN(bool changed_for_computation,
HloDCE::RunOnComputation(
while_body_comp,
false));
changed |= changed_for_computation;
}
return changed;
}
}
absl::StatusOr<bool> HloModuleDCE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Before HloModuleDCE:";
XLA_VLOG_LINES(3, module->ToString());
std::unique_ptr<HloLivenessAnalysis> liveness;
TF_ASSIGN_OR_RETURN(liveness, HloLivenessAnalysis::Run(*module));
TF_ASSIGN_OR_RETURN(bool hlo_module_dce_changed,
RunWhileDCE(module, liveness.get(), execution_threads));
WhileLoopSimplifier while_loop_simplifier;
TF_ASSIGN_OR_RETURN(bool while_loop_simplifier_changed,
while_loop_simplifier.Run(module, execution_threads));
TupleSimplifier tuple_simplifier;
TF_ASSIGN_OR_RETURN(bool tuple_simplifier_changed,
tuple_simplifier.Run(module, execution_threads));
HloDCE hlo_dce;
TF_ASSIGN_OR_RETURN(bool hlo_dce_changed,
hlo_dce.Run(module, execution_threads));
VLOG(2) << "After HloModuleDCE:";
XLA_VLOG_LINES(3, module->ToString());
return hlo_module_dce_changed | hlo_dce_changed | tuple_simplifier_changed |
while_loop_simplifier_changed;
}
} | #include "xla/service/hlo_module_dce.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloModuleDceTest : public HloTestBase {
protected:
HloModuleDceTest() {}
bool HasInstruction(const HloComputation& computation,
const HloInstruction* instruction) {
return absl::c_linear_search(computation.instructions(), instruction);
}
bool WhileBodyHasPassThroughTupleElement(const HloComputation* computation,
const std::string& while_name,
const int64_t tuple_index) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile &&
instruction->name() == while_name) {
auto* while_body_comp = instruction->while_body();
auto* while_body_param = while_body_comp->parameter_instruction(0);
auto* while_body_root = while_body_comp->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
return false;
}
auto* operand = while_body_root->operand(tuple_index);
if (operand->opcode() == HloOpcode::kGetTupleElement &&
operand->tuple_index() == tuple_index &&
operand->operand(0) == while_body_param) {
return true;
}
return false;
}
}
return false;
}
std::vector<const HloInstruction*> GetWhileLoops(
const HloComputation* computation) {
std::vector<const HloInstruction*> while_loops;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
while_loops.push_back(instruction);
}
}
return while_loops;
}
};
TEST_F(HloModuleDceTest, WhileWithLiveOutputs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, WhileWithUnusedSideEffectingTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], f32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = f32[] get-tuple-element(loop_var.1), index=1
constant.2 = f32[] constant(1.0)
rng = f32[] rng(constant.2, get-tuple-element.2), distribution=rng_uniform
add.1 = f32[] add(get-tuple-element.2, constant.2)
ROOT tuple = (s32[], f32[]) tuple(add, add.1)
}
SimpleLoop.condition {
loop_var.2 = (s32[], f32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.3 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.3), direction=LT
}
ENTRY SimpleLoop {
constant.4 = s32[] constant(0)
constant.5 = f32[] constant(0.0)
tuple.1 = (s32[], f32[]) tuple(constant.4, constant.5)
while = (s32[], f32[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, OneWhileWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(1, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
}
TEST_F(HloModuleDceTest, OneWhileWithTupleElementUsedByCond) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
multiply = s32[] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[]) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=1
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.3, constant.4)
while = (s32[], s32[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body0 {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
multiply.1 = s32[3]{0} multiply(get-tuple-element.5, get-tuple-element.5)
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, multiply.1)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(10)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[], s32[3]{0}) tuple(constant.5, constant.6)
while.1 = (s32[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[] get-tuple-element(while.1), index=0
tuple.3 = (s32[], s32[3]{0}) tuple(get-tuple-element.7, constant.6)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(2, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape()));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadTupleElementSwizzled) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body0 {
loop_var.1 = (s32[3]{0}, s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=1
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=0
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[3]{0}, s32[]) tuple(multiply, add)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[3]{0}, s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=1
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
multiply.1 = s32[3]{0} multiply(get-tuple-element.5, get-tuple-element.5)
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, multiply.1)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(10)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[3]{0}, s32[]) tuple(constant.6, constant.5)
while.1 = (s32[3]{0}, s32[]) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[] get-tuple-element(while.1), index=1
tuple.3 = (s32[], s32[3]{0}) tuple(get-tuple-element.7, constant.6)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(2, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape()));
}
TEST_F(HloModuleDceTest, WhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
WhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
WhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT rtuple = () tuple()
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
}
TEST_F(HloModuleDceTest, WhileWithOnlyLoopVariableBumping) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule InfiniteLoop
WhileBody {
body_param = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
get-tuple-element.2 = s32[] get-tuple-element(body_param), index=1
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[], s32[]) tuple(add, get-tuple-element.2)
}
WhileCondition {
cond_param = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
p0 = (s32[]) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(p0), index=0
constant.3 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.3, get-tuple-element.5)
while = (s32[], s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=1
})")
.value();
HloModuleDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadWhileLoop) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TwoWhilesWithDeadWhileLoop
SimpleLoop.body0 {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
ROOT tuple = (s32[], s32[3]{0}) tuple(add, get-tuple-element.2)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, get-tuple-element.5)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(5)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[], s32[3]{0}) tuple(constant.5, constant.6)
while.1 = (s32[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[3]{0} get-tuple-element(while.1), index=1
constant.7 = s32[] constant(0)
tuple.3 = (s32[], s32[3]{0}) tuple(constant.7, get-tuple-element.7)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_TRUE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_TRUE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(1, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_dce.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_dce_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be4ba84d-b0c8-4011-bcdf-ed33ef61fd5b | cpp | tensorflow/tensorflow | reduce_window_rewriter | third_party/xla/xla/service/reduce_window_rewriter.cc | third_party/xla/xla/service/reduce_window_rewriter_test.cc | #include "xla/service/reduce_window_rewriter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
static size_t FlattenShapeIndex(const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return 0;
}
CHECK_EQ(shape_index.size(), 1);
return shape_index.back();
}
static Shape ShapeAtIndex(const Shape& shape, const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return shape;
}
CHECK_EQ(shape_index.size(), 1);
return ShapeUtil::GetTupleElementShape(shape, shape_index.back());
}
static HloInstruction* GetAtIndex(HloInstruction* hlo,
const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return hlo;
}
CHECK_EQ(shape_index.size(), 1);
return hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeAtIndex(hlo->shape(), shape_index), hlo, shape_index.back()));
}
absl::Status ReduceWindowRewriter::ReplaceReduceWindowWithReshape(
HloReduceWindowInstruction* reduce_window) {
VLOG(2) << "Converting R1 reduce window: " << reduce_window->ToString();
std::vector<Shape> r2_output_shapes;
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), shape_index)) {
return;
}
Shape r2_output_shape = subshape;
ShapeUtil::AppendMajorDimension(1, &r2_output_shape);
UpdateLayout(&r2_output_shape);
r2_output_shapes.push_back(r2_output_shape);
VLOG(2) << "ReduceWindowRewriter: Converting R2 result to R1: "
<< ShapeUtil::HumanStringWithLayout(r2_output_shape);
});
Window r2_window = reduce_window->window();
WindowDimension* dim = r2_window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
std::vector<HloInstruction*> r2_operands;
for (HloInstruction* operand : reduce_window->inputs()) {
Shape r2_input_shape = operand->shape();
ShapeUtil::AppendMajorDimension(1, &r2_input_shape);
UpdateLayout(&r2_input_shape);
VLOG(2) << "ReduceWindowRewriter: Converting R1 operand to R2: "
<< ShapeUtil::HumanStringWithLayout(r2_input_shape);
HloInstruction* r2_operand = operand->parent()->AddInstruction(
HloInstruction::CreateReshape(r2_input_shape, operand));
VLOG(2) << "R2 new operand: " << r2_operand->ToString();
r2_operands.push_back(r2_operand);
}
HloInstruction* new_reduce_window = reduce_window->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(r2_output_shapes)
: r2_output_shapes[0],
r2_operands, reduce_window->init_values(), r2_window,
reduce_window->to_apply()));
VLOG(2) << "R2 resulting reduce window: " << new_reduce_window->ToString();
std::vector<HloInstruction*> final_reshapes;
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), shape_index)) {
return;
}
HloInstruction* final_reshape =
new_reduce_window->parent()->AddInstruction(
HloInstruction::CreateReshape(
subshape, GetAtIndex(new_reduce_window, shape_index)));
final_reshapes.push_back(final_reshape);
});
HloInstruction* result;
if (reduce_window->shape().IsTuple()) {
result = new_reduce_window->parent()->AddInstruction(
HloInstruction::CreateTuple(final_reshapes));
} else {
CHECK_EQ(final_reshapes.size(), 1);
result = final_reshapes[0];
}
TF_RETURN_IF_ERROR(reduce_window->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(
new_reduce_window->parent()->RemoveInstruction(reduce_window));
return absl::OkStatus();
}
absl::StatusOr<bool> ReduceWindowRewriter::TryOptimizeCumSumOrProd(
HloReduceWindowInstruction* reduce_window) {
const Shape& operand_shape = reduce_window->inputs().front()->shape();
int64_t rank = operand_shape.rank();
const Window& window = reduce_window->window();
int64_t scan_dim_num = -1;
for (int i = 0; i < rank; ++i) {
const WindowDimension& window_dim = window.dimensions(i);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
if (scan_dim_num != -1) {
return false;
}
scan_dim_num = i;
}
if (scan_dim_num == -1) {
return false;
}
const int64_t scan_length = operand_shape.dimensions(scan_dim_num);
absl::Span<HloInstruction* const> init_values = reduce_window->init_values();
const WindowDimension& scan_window_dim = window.dimensions(scan_dim_num);
bool forward_scan = (scan_window_dim.padding_low() == scan_length - 1 ||
scan_window_dim.padding_low() == scan_length) &&
scan_window_dim.padding_high() == 0;
bool reverse_scan = (scan_window_dim.padding_high() == scan_length - 1 ||
scan_window_dim.padding_high() == scan_length) &&
scan_window_dim.padding_low() == 0;
if (scan_window_dim.stride() != 1 || scan_window_dim.size() != scan_length ||
(!forward_scan && !reverse_scan) || scan_window_dim.window_reversal() ||
scan_window_dim.base_dilation() != 1 ||
scan_window_dim.window_dilation() != 1) {
return false;
}
bool is_exclusive = forward_scan
? (scan_window_dim.padding_low() == scan_length)
: (scan_window_dim.padding_high() == scan_length);
if (scan_length <= base_length_) {
return false;
}
if (reduce_window->to_apply()->root_instruction()->shape().IsTuple() &&
reduce_window->to_apply()->root_instruction()->opcode() !=
HloOpcode::kTuple) {
return false;
}
VLOG(2) << "Rewriting Scan: " << reduce_window->ToString();
HloComputation* parent = reduce_window->parent();
std::vector<HloInstruction*> sources(reduce_window->inputs().begin(),
reduce_window->inputs().end());
std::vector<int64_t> permutation(rank);
absl::c_iota(permutation, 0);
permutation[scan_dim_num] = rank - 1;
permutation[rank - 1] = scan_dim_num;
if (scan_dim_num != rank - 1) {
for (size_t i = 0; i < sources.size(); ++i) {
sources[i] = parent->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(permutation, sources[i]->shape()),
sources[i], permutation));
}
}
const int64_t padded_length = RoundUpTo(scan_length, base_length_);
if (scan_length != padded_length) {
for (size_t i = 0; i < sources.size(); ++i) {
auto* source = sources[i];
Shape padded_shape = source->shape();
padded_shape.set_dimensions(rank - 1, padded_length);
UpdateLayout(&padded_shape);
auto padding_config = MakeNoPaddingConfig(rank);
padding_config.mutable_dimensions(rank - 1)->set_edge_padding_high(
padded_length - scan_length);
sources[i] = parent->AddInstruction(HloInstruction::CreatePad(
padded_shape, source, init_values[i], padding_config));
}
}
const int64_t num_columns = padded_length / base_length_;
std::vector<HloInstruction*> tiled_sources;
std::vector<Shape> tiled_shapes;
for (size_t i = 0; i < sources.size(); ++i) {
auto* source = sources[i];
Shape tiled_shape = source->shape();
tiled_shape.set_dimensions(rank - 1, num_columns);
UpdateLayout(&tiled_shape);
ShapeUtil::AppendMajorDimension(base_length_, &tiled_shape);
tiled_shapes.push_back(tiled_shape);
tiled_sources.push_back(parent->AddInstruction(
HloInstruction::CreateReshape(tiled_shape, source)));
}
Window outer_window =
window_util::MakeWindow(std::vector<int64_t>(rank + 1, 1));
outer_window.mutable_dimensions(rank)->set_size(base_length_);
if (forward_scan) {
outer_window.mutable_dimensions(rank)->set_padding_low(base_length_ - 1);
} else {
outer_window.mutable_dimensions(rank)->set_padding_high(base_length_ - 1);
}
auto outer_reduce_window =
parent->AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(tiled_shapes)
: tiled_shapes[0],
tiled_sources, init_values, outer_window, reduce_window->to_apply()));
std::vector<Shape> column_shapes;
std::vector<HloInstruction*> last_cols;
ShapeUtil::ForEachSubshape(
outer_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return;
}
Shape column_shape = subshape;
column_shape.set_dimensions(rank, 1);
UpdateLayout(&column_shape);
std::vector<int64_t> col_slice_starts(rank + 1, 0);
std::vector<int64_t> col_slice_limits(
SpanToVector(subshape.dimensions()));
if (forward_scan) {
col_slice_starts[rank] = base_length_ - 1;
} else {
col_slice_limits[rank] = 1;
}
auto last_col = parent->AddInstruction(HloInstruction::CreateSlice(
column_shape, GetAtIndex(outer_reduce_window, shape_index),
col_slice_starts, col_slice_limits,
std::vector<int64_t>(rank + 1, 1)));
column_shape.DeleteDimension(rank);
last_col = parent->AddInstruction(
HloInstruction::CreateReshape(column_shape, last_col));
last_cols.push_back(last_col);
column_shape.set_dimensions(rank - 1, num_columns + 1);
UpdateLayout(&column_shape);
column_shapes.push_back(column_shape);
});
Window inner_window = window_util::MakeWindow(std::vector<int64_t>(rank, 1));
inner_window.mutable_dimensions(rank - 1)->set_size(num_columns);
if (forward_scan) {
inner_window.mutable_dimensions(rank - 1)->set_padding_low(num_columns);
} else {
inner_window.mutable_dimensions(rank - 1)->set_padding_high(num_columns);
}
auto inner_reduce_window =
parent->AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(column_shapes)
: column_shapes[0],
last_cols, init_values, inner_window, reduce_window->to_apply()));
std::vector<int64_t> exclusive_slice_starts(rank, 0);
std::vector<int64_t> exclusive_slice_limits =
SpanToVector(column_shapes[0].dimensions());
if (forward_scan) {
exclusive_slice_limits[rank - 1] = num_columns;
} else {
exclusive_slice_starts[rank - 1] = 1;
exclusive_slice_limits[rank - 1] = num_columns + 1;
}
std::vector<HloInstruction*> inner_scan_components;
ShapeUtil::ForEachSubshape(
inner_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(inner_reduce_window->shape(),
shape_index)) {
return;
}
size_t idx = FlattenShapeIndex(shape_index);
auto last_col = last_cols[idx];
auto* inner_slice = parent->AddInstruction(HloInstruction::CreateSlice(
last_col->shape(), GetAtIndex(inner_reduce_window, shape_index),
exclusive_slice_starts, exclusive_slice_limits,
std::vector<int64_t>(rank, 1)));
std::vector<int64_t> rank_iota(rank);
absl::c_iota(rank_iota, 0);
auto* inner_scan_component =
parent->AddInstruction(HloInstruction::CreateBroadcast(
tiled_shapes[idx], inner_slice, rank_iota));
inner_scan_components.push_back(inner_scan_component);
});
std::vector<HloInstruction*> map_operands;
ShapeUtil::ForEachSubshape(
outer_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return;
}
map_operands.push_back(GetAtIndex(outer_reduce_window, shape_index));
});
map_operands.insert(map_operands.end(), inner_scan_components.begin(),
inner_scan_components.end());
std::vector<HloInstruction*> scans;
auto status = ShapeUtil::ForEachSubshapeWithStatus(
outer_reduce_window->shape(),
[&](const Shape& subshape,
const ShapeIndex& shape_index) -> absl::Status {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return absl::OkStatus();
}
size_t idx = FlattenShapeIndex(shape_index);
auto source = sources[idx];
HloComputation* map_computation;
auto reduce_function_root =
reduce_window->to_apply()->root_instruction();
if (reduce_function_root->shape().IsTuple()) {
TF_RET_CHECK(reduce_function_root->opcode() == HloOpcode::kTuple);
auto* map_computation_root = reduce_function_root->operand(idx);
absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>
replacements;
replacements[reduce_function_root] = nullptr;
map_computation = parent->parent()->AddEmbeddedComputation(
reduce_window->to_apply()->CloneWithReplacements(
&replacements,
{}, nullptr, "clone",
map_computation_root));
} else {
map_computation = reduce_window->to_apply();
}
auto scan = parent->AddInstruction(HloInstruction::CreateMap(
ShapeAtIndex(outer_reduce_window->shape(), shape_index),
map_operands, map_computation));
scan = parent->AddInstruction(
HloInstruction::CreateReshape(source->shape(), scan));
if (scan_dim_num != rank - 1) {
scan = parent->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(permutation, source->shape()), scan,
permutation));
}
if (padded_length != scan_length) {
scan = parent->AddInstruction(HloInstruction::CreateSlice(
operand_shape, scan, std::vector<int64_t>(rank, 0),
operand_shape.dimensions(), std::vector<int64_t>(rank, 1)));
}
if (is_exclusive) {
auto padding_config = MakeNoPaddingConfig(rank);
if (forward_scan) {
padding_config.mutable_dimensions(scan_dim_num)
->set_edge_padding_low(1);
} else {
padding_config.mutable_dimensions(scan_dim_num)
->set_edge_padding_high(1);
}
scan = parent->AddInstruction(HloInstruction::CreatePad(
ShapeAtIndex(reduce_window->shape(), shape_index), scan,
init_values[idx], padding_config));
}
scans.push_back(scan);
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
HloInstruction* scan;
if (reduce_window->shape().IsTuple()) {
scan = parent->AddInstruction(HloInstruction::CreateTuple(scans));
} else {
CHECK_EQ(scans.size(), 1);
scan = scans[0];
}
TF_RETURN_IF_ERROR(reduce_window->ReplaceAllUsesWith(scan));
TF_RETURN_IF_ERROR(parent->RemoveInstruction(reduce_window));
return true;
}
absl::StatusOr<bool> ReduceWindowRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto& computation : module->computations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
HloReduceWindowInstruction* reduce_window =
DynCast<HloReduceWindowInstruction>(instruction);
if (!reduce_window) {
continue;
}
TF_ASSIGN_OR_RETURN(bool made_change,
TryOptimizeCumSumOrProd(reduce_window));
if (made_change) {
changed = true;
continue;
}
if (reduce_window->inputs().front()->shape().rank() != 1) {
continue;
}
TF_RETURN_IF_ERROR(ReplaceReduceWindowWithReshape(reduce_window));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_window_rewriter.h"
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ReduceWindowRewriterTest : public HloTestBase {
public:
void CheckReduceWindowRewrite(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, ReduceWindowRewriter{128}, expected);
}
};
TEST_F(ReduceWindowRewriterTest, EliminateR1) {
const char* hlo = R"(
%binary_add {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %a, f32[] %b)
}
ENTRY %EliminateR1 (input: f32[10]) -> f32[10] {
%input = f32[10]{0} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[10]{0} reduce-window(f32[10]{0} %input, f32[] %constant), window={size=5 pad=2_2}, to_apply=%binary_add
}
)";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, EliminateR1Variadic) {
const char* hlo = R"(
HloModule reduce-window
add_float {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT root = (f32[], f32[]) tuple(sum.0, sum.1)
}
ENTRY entry (arg: f32[10]) -> (f32[10], f32[10]) {
arg = f32[10]{0} parameter(0)
constant = f32[] constant(0)
ROOT reduce-window = (f32[10]{0}, f32[10]{0}) reduce-window(f32[10]{0} %arg, f32[10]{0} %arg, f32[] %constant, f32[] %constant), window={size=5 pad=2_2}, to_apply=%add_float
})";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, OptimizeR1InclusiveScan) {
const char* hlo = R"(
HloModule reduce-window
add_float {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY entry (arg: f32[46592]) -> f32[46592] {
arg = f32[46592]{0} parameter(0)
constant = f32[] constant(0)
ROOT reduce-window = f32[46592]{0} reduce-window(f32[46592]{0} %arg, f32[] %constant), window={size=46592 pad=46591_0}, to_apply=%add_float
})";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, OptimizeR1InclusiveScanVariadic) {
const std::string hlo_string = R"(
HloModule reduce-window
MaxMin {
l.max = f32[] parameter(0)
l.min = f32[] parameter(1)
r.max = f32[] parameter(2)
r.min = f32[] parameter(3)
max = f32[] maximum(l.max, r.max)
min = f32[] minimum(l.min, r.min)
ROOT root = (f32[], f32[]) tuple(max, min)
}
ENTRY entry (arg_0: f32[46592], arg_1: f32[46592]) -> (f32[46592], f32[46592]) {
arg.0 = f32[46592]{0} parameter(0)
arg.1 = f32[46592]{0} parameter(1)
init_ninf = f32[] constant(-inf)
init_inf = f32[] constant(inf)
ROOT reduce-window = (f32[46592]{0}, f32[46592]{0}) reduce-window(f32[46592]{0} %arg.0, f32[46592]{0} %arg.1, f32[] %init_ninf, f32[] %init_inf), window={size=46592 pad=46591_0}, to_apply=%MaxMin
}
)";
CheckReduceWindowRewrite(hlo_string, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_window_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_window_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
524aa7af-ef63-4f60-b2bc-3d00af43a14a | cpp | tensorflow/tensorflow | bitcast_dtypes_expander | third_party/xla/xla/service/bitcast_dtypes_expander.cc | third_party/xla/xla/service/bitcast_dtypes_expander_test.cc | #include "xla/service/bitcast_dtypes_expander.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/broadcast.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<HloInstruction*> BitcastDtypesExpander::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* input = instruction->mutable_operand(0);
const Shape& from_shape = input->shape();
const Shape& to_shape = instruction->shape();
int input_bit_width = primitive_util::BitWidth(from_shape.element_type());
int output_bit_width = primitive_util::BitWidth(to_shape.element_type());
PrimitiveType input_logical_type =
primitive_util::UnsignedIntegralTypeForBitWidth(input_bit_width);
PrimitiveType output_logical_type =
primitive_util::UnsignedIntegralTypeForBitWidth(output_bit_width);
if (input_bit_width == output_bit_width) {
return instruction;
}
std::string name =
absl::StrFormat("xla.bitcast_convert_%s_2_%s", from_shape.ToString(),
to_shape.ToString());
HloModule* module = instruction->GetModule();
HloComputation*& computation =
computation_cache_.emplace(name, nullptr).first->second;
if (!computation) {
XlaBuilder b(name);
XlaOp input = Parameter(&b, 0, instruction->operand(0)->shape(), "a");
if (input_bit_width > output_bit_width) {
std::vector<int64_t> broadcasted_input_shape(
from_shape.dimensions().begin(), from_shape.dimensions().end());
std::vector<int64_t> reshaped_input_shape(from_shape.dimensions().begin(),
from_shape.dimensions().end());
broadcasted_input_shape.push_back(input_bit_width / output_bit_width);
reshaped_input_shape.push_back(1);
int64_t output_bit_width_mask = (int64_t{1} << output_bit_width) - 1;
TF_ASSIGN_OR_RETURN(input,
BroadcastTo(Reshape(input, reshaped_input_shape),
broadcasted_input_shape));
input = BitcastConvertType(input, input_logical_type);
TF_ASSIGN_OR_RETURN(Shape input_shape, b.GetShape(input));
XlaOp iota = Iota(&b, input_shape, input_shape.dimensions_size() - 1);
XlaOp iota_m = Mul(ScalarLike(input, output_bit_width), iota);
input = And(ShiftRightLogical(input, iota_m),
ScalarLike(input, output_bit_width_mask));
input = ConvertElementType(input, output_logical_type);
} else if (input_bit_width < output_bit_width) {
input = BitcastConvertType(input, input_logical_type);
input = ConvertElementType(input, output_logical_type);
XlaOp iota_m = Mul(
ConstantR0WithType(&b, output_logical_type, input_bit_width),
Iota(&b,
ShapeUtil::ChangeElementType(from_shape, output_logical_type),
from_shape.rank() - 1));
input = ShiftLeft(input, iota_m);
input = Reduce(input, Zero(&b, output_logical_type),
CreateScalarOrComputation(output_logical_type, &b),
{from_shape.rank() - 1});
}
BitcastConvertType(input, to_shape.element_type());
TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, b.Build());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
xla_computation.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(
xla_computation.proto(), config));
HloCloneContext context(module);
computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
}
return instruction->parent()->AddInstruction(HloInstruction::CreateCall(
instruction->shape(), instruction->operands(), computation));
}
bool BitcastDtypesExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBitcastConvert &&
primitive_util::BitWidth(instruction->shape().element_type()) !=
primitive_util::BitWidth(
instruction->operand(0)->shape().element_type());
}
} | #include "xla/service/bitcast_dtypes_expander.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class BitcastDtypesExpanderTest : public HloTestBase {};
TEST_F(BitcastDtypesExpanderTest, S32toS8) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_smaller
ENTRY main {
p = s32[10] parameter(0)
ROOT out = s8[10,4] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, S64toS32) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_smaller
ENTRY main {
p = s64[10] parameter(0)
ROOT out = s32[10,2] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, S8toS32) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_larger
ENTRY main {
p = s8[10,4] parameter(0)
ROOT out = s32[10] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, RewriteInsideWhileTest) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
converted_val2 = s8[4] bitcast-convert(val2)
converted_const = s8[4] bitcast-convert(const)
add = s8[4] add(converted_val2, converted_const)
out_add = s32[] bitcast-convert(add)
ROOT root = (f32[2], s32[]) tuple(val1, out_add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bitcast_dtypes_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bitcast_dtypes_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d44286ee-dc18-4d49-81ae-9830d313d9ec | cpp | tensorflow/tensorflow | fusion_node_indexing_evaluation | third_party/xla/xla/service/fusion_node_indexing_evaluation.cc | third_party/xla/xla/service/fusion_node_indexing_evaluation_test.cc | #include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
FusionNodeIndexingEvaluation::FusionNodeIndexingEvaluation(
const HloInstruction* fusion, int64_t root_usage_count)
: fusion_(fusion) {
HloInstruction* root = fusion->fused_expression_root();
indexing_users_[root].insert(fusion);
index_usage_count_[fusion] = root_usage_count;
RecomputeCache();
}
const int64_t FusionNodeIndexingEvaluation::kAllowedCodeDuplication = 15;
namespace {
int64_t UserCount(const HloInstruction* hlo) {
int64_t cnt = 0;
for (HloInstruction* user : hlo->users()) {
if (user->opcode() == HloOpcode::kFusion) {
int64_t operand_index = user->operand_index(hlo);
cnt += user->fused_parameter(operand_index)->user_count();
} else {
++cnt;
}
}
return cnt;
}
}
bool FusionNodeIndexingEvaluation::CodeDuplicationTooHigh(
const HloInstruction* producer) const {
if (producer->opcode() == HloOpcode::kBroadcast) {
return false;
}
int64_t emitted_instructions = EvaluateEmittedInstructions(producer);
return emitted_instructions > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(producer) &&
(emitted_instructions > 1 || UserCount(producer) > 1));
}
bool FusionNodeIndexingEvaluation::MaxCodeDuplicationTooHigh() const {
for (const auto& entry : index_usage_count_) {
if (entry.second > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(entry.first) &&
(entry.second > 1 || UserCount(entry.first) > 1))) {
return true;
}
}
return false;
}
int64_t FusionNodeIndexingEvaluation::EvaluateEmittedInstructions(
const HloInstruction* producer) const {
int64_t total = 0;
for (const auto* user : indexing_users_.at(producer)) {
total += index_usage_count_.at(user);
}
return total;
}
void FusionNodeIndexingEvaluation::UpdateEvaluationCache(
const HloInstruction* producer,
absl::flat_hash_set<const HloInstruction*> indexing_users_of_producer) {
CHECK(!indexing_users_.contains(producer));
indexing_users_[producer] = std::move(indexing_users_of_producer);
UpdateIndexUsageCount(producer);
UpdateIndexingUsersOfOperands(producer);
}
absl::flat_hash_set<const HloInstruction*>
FusionNodeIndexingEvaluation::RemoveFusionOperand(
HloInstruction* fusion_operand) {
auto indexing_users_of_operand =
std::move(indexing_users_.at(fusion_operand));
indexing_users_.erase(fusion_operand);
CHECK(!index_usage_count_.contains(fusion_operand));
return indexing_users_of_operand;
}
void FusionNodeIndexingEvaluation::RecomputeCache() {
auto postorder =
fusion_->fused_instructions_computation()->MakeInstructionPostOrder();
std::reverse(postorder.begin(), postorder.end());
for (const auto* instruction : postorder) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
UpdateIndexUsageCount(instruction);
UpdateIndexingUsersOfOperands(instruction);
}
}
void FusionNodeIndexingEvaluation::UpdateIndexUsageCount(
const HloInstruction* instruction) {
int64_t total = 0;
for (const auto* user : indexing_users_[instruction]) {
total += index_usage_count_.at(user);
}
CHECK(index_usage_count_.emplace(instruction, total).second);
}
void FusionNodeIndexingEvaluation::UpdateIndexingUsersOfOperands(
const HloInstruction* instruction) {
for (const auto* operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kParameter) {
operand = fusion_->operand(operand->parameter_number());
}
if (instruction->opcode() == HloOpcode::kTranspose ||
Shape::Equal().IgnoreElementType()(operand->shape(),
instruction->shape())) {
indexing_users_[operand].insert(indexing_users_[instruction].begin(),
indexing_users_[instruction].end());
} else {
indexing_users_[operand].insert(instruction);
}
}
}
} | #include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/instruction_fusion.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
using FusionNodeIndexingEvaluationTest = HloTestBase;
class InstructionFusionForTesting : public InstructionFusion {
public:
explicit InstructionFusionForTesting()
: InstructionFusion(InstructionFusion::IsExpensive) {}
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation =
fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation) override {
return InstructionFusion::Fuse(producer, consumer, computation);
}
int64_t EvaluateEmittedInstructions(const HloInstruction* producer,
const HloInstruction* consumer) {
if (consumer->opcode() != HloOpcode::kFusion) {
return 0;
}
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
return fusion_node_evaluations_.at(consumer).EvaluateEmittedInstructions(
producer);
}
const FusionNodeIndexingEvaluation* GetFusionNodeEvaluation(
const HloInstruction* consumer) {
auto it = fusion_node_evaluations_.find(consumer);
if (it == fusion_node_evaluations_.end()) {
return nullptr;
}
return &it->second;
}
private:
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
};
TEST_F(FusionNodeIndexingEvaluationTest, FuseTwoInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4,3]{1,0} parameter(0)
add = f32[4,3]{1,0} add(p0, p0)
ROOT sub = f32[4,3]{1,0} subtract(add, p0)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
HloInstruction* add = sub->mutable_operand(0);
InstructionFusionForTesting().Fuse(add, sub, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, FuseThreeInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
slice1 = f32[3]{0} slice(p0), slice={[0:3]}
slice2 = f32[3]{0} slice(p0), slice={[0:3]}
ROOT sub = f32[3]{0} subtract(slice1, slice2)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice1 = sub->mutable_operand(0);
HloInstruction* slice2 = sub->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice1, sub, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2, fusion), 1);
instruction_fusion.Fuse(slice2, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, ExponentialDuplicationPattern) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
slice1.0 = f32[3]{0} slice(add0), slice={[0:3]}
slice1.1 = f32[3]{0} slice(add0), slice={[1:4]}
add1 = f32[3]{0} add(slice1.0, slice1.1)
slice2.0 = f32[2]{0} slice(add1), slice={[0:2]}
slice2.1 = f32[2]{0} slice(add1), slice={[1:3]}
ROOT add2 = f32[2]{0} add(slice2.0, slice2.1)
})")
.value();
HloInstruction* add2 = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice2_0 = add2->mutable_operand(0);
HloInstruction* slice2_1 = add2->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice2_0, add2, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2_1, fusion),
1);
instruction_fusion.Fuse(slice2_1, fusion, module->entry_computation());
HloInstruction* add1 = fusion->mutable_operand(0);
EXPECT_EQ(add1->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add1, fusion), 2);
instruction_fusion.Fuse(add1, fusion, module->entry_computation());
HloInstruction* slice1_0 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_0->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_0, fusion),
2);
instruction_fusion.Fuse(slice1_0, fusion, module->entry_computation());
HloInstruction* slice1_1 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_1->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_1, fusion),
2);
instruction_fusion.Fuse(slice1_1, fusion, module->entry_computation());
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
instruction_fusion.Fuse(add0, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, RecomputeCache) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param_0.5: f32[4]) -> f32[2] {
%param_0.5 = f32[4]{0} parameter(0)
%slice1.2 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[0:3]}
%slice1.3 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[1:4]}
%add1.1 = f32[3]{0} add(f32[3]{0} %slice1.2, f32[3]{0} %slice1.3)
%slice2.2 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[0:2]}
%slice2.3 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[1:3]}
ROOT %add2.1 = f32[2]{0} add(f32[2]{0} %slice2.2, f32[2]{0} %slice2.3)
}
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
ROOT %fusion = f32[2]{0} fusion(add0), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
}
TEST_F(FusionNodeIndexingEvaluationTest, CodeDuplicationTooHigh) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param: f32[6]) -> f32[2] {
%param = f32[6]{0} parameter(0)
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
ENTRY entry_computation {
p0 = f32[] parameter(0)
add = f32[] add(p0, p0)
broadcast = f32[6]{0} broadcast(add), dimensions={}
ROOT %fusion = f32[2]{0} fusion(broadcast), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* broadcast = fusion->mutable_operand(0);
EXPECT_EQ(broadcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(broadcast, fusion),
16);
EXPECT_FALSE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(broadcast));
instruction_fusion.Fuse(broadcast, fusion, module->entry_computation());
HloInstruction* add = fusion->mutable_operand(0);
EXPECT_EQ(add->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add, fusion), 16);
EXPECT_TRUE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(add));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_node_indexing_evaluation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_node_indexing_evaluation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa6cc3de-ce31-4a6b-9ff6-3389fd34736b | cpp | tensorflow/tensorflow | hlo_memory_scheduler | third_party/xla/xla/service/hlo_memory_scheduler.cc | third_party/xla/xla/service/hlo_memory_scheduler_test.cc | #include "xla/service/hlo_memory_scheduler.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace {
using ::tsl::strings::HumanReadableNumBytes;
class ListScheduler {
public:
static absl::StatusOr<HloInstructionSequence> Run(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_function) {
ListScheduler scheduler(computation, points_to_analysis, size_function);
return scheduler.CreateSchedule();
}
static bool IgnoreInstruction(const HloInstruction& instruction) {
return instruction.opcode() == HloOpcode::kParameter ||
instruction.opcode() == HloOpcode::kConstant;
}
private:
using Priority = std::pair<int64_t, int64_t>;
ListScheduler(HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_function)
: computation_(computation),
points_to_analysis_(points_to_analysis),
size_function_(size_function) {
for (auto* instruction : computation->instructions()) {
absl::flat_hash_set<const LogicalBuffer*> instr_uses;
for (auto* operand : instruction->operands()) {
points_to_analysis.GetPointsToSet(operand).ForEachElement(
[&](const ShapeIndex& ,
const PointsToSet::BufferList& buffers) {
instr_uses.insert(buffers.begin(), buffers.end());
});
}
buffer_uses_[instruction] = std::vector<const LogicalBuffer*>(
instr_uses.begin(), instr_uses.end());
}
unscheduled_use_count_.reserve(points_to_analysis.num_logical_buffers());
for (auto* instruction : computation->instructions()) {
for (auto* buffer :
points_to_analysis.GetBuffersDefinedByInstruction(instruction)) {
unscheduled_use_count_[buffer] = 0;
}
}
for (auto* instruction : computation->instructions()) {
for (const LogicalBuffer* buffer : buffer_uses_.at(instruction)) {
++unscheduled_use_count_[buffer];
}
}
for (const LogicalBuffer* live_out_buffer :
points_to_analysis.GetPointsToSet(computation->root_instruction())
.CreateFlattenedSet()) {
++unscheduled_use_count_[live_out_buffer];
}
}
static bool IgnoreBuffer(const LogicalBuffer& buffer) {
return IgnoreInstruction(*buffer.instruction());
}
struct ReadyListEntry {
HloInstruction* instruction;
int64_t bytes_defined;
std::vector<const std::pair<const LogicalBuffer* const, int64_t>*>
used_buffer_unscheduled_use_counts;
};
ReadyListEntry MakeReadyListEntry(HloInstruction* instruction) {
ReadyListEntry entry;
entry.instruction = instruction;
entry.bytes_defined = 0;
for (auto* buffer :
points_to_analysis_.GetBuffersDefinedByInstruction(instruction)) {
if (!IgnoreBuffer(*buffer)) {
entry.bytes_defined += size_function_(*buffer);
}
}
for (auto* buffer : buffer_uses_.at(instruction)) {
if (IgnoreBuffer(*buffer)) {
continue;
}
auto unscheduled_use_count_it = unscheduled_use_count_.find(buffer);
CHECK(unscheduled_use_count_it != unscheduled_use_count_.end());
entry.used_buffer_unscheduled_use_counts.push_back(
&*unscheduled_use_count_it);
}
return entry;
}
int64_t BytesFreedIfScheduled(const ReadyListEntry& entry) {
auto instruction = entry.instruction;
auto opcode = instruction->opcode();
if (opcode == HloOpcode::kOutfeed &&
!instruction->outfeed_config().empty()) {
return INT_MAX;
}
if (opcode == HloOpcode::kInfeed && !instruction->infeed_config().empty()) {
return INT_MIN;
}
int64_t freed_bytes = 0;
for (const auto& kv : entry.used_buffer_unscheduled_use_counts) {
auto buffer = kv->first;
auto use_count = kv->second;
if (use_count == 1) {
freed_bytes += size_function_(*buffer);
}
}
return freed_bytes - entry.bytes_defined;
}
Priority GetPriority(const ReadyListEntry& entry) {
if (ShapeUtil::IsEffectiveScalar(entry.instruction->shape())) {
return {std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max()};
}
return {BytesFreedIfScheduled(entry), entry.instruction->user_count()};
}
HloInstructionSequence CreateSchedule() {
HloInstructionSequence schedule;
absl::flat_hash_map<const HloInstruction*, int64_t> unscheduled_pred_count;
for (auto* instruction : computation_->instructions()) {
for (HloInstruction* user : instruction->users()) {
unscheduled_pred_count[user]++;
}
for (HloInstruction* succ : instruction->control_successors()) {
unscheduled_pred_count[succ]++;
}
}
std::multimap<Priority, ReadyListEntry> ready_queue;
absl::flat_hash_map<const HloInstruction*,
std::multimap<Priority, ReadyListEntry>::iterator>
ready_instructions;
auto add_to_ready_queue = [&](HloInstruction* inst) {
auto entry = MakeReadyListEntry(inst);
auto it = ready_queue.emplace(GetPriority(entry), std::move(entry));
ready_instructions[inst] = it;
};
for (auto* instruction : computation_->instructions()) {
if (instruction->operands().empty() &&
instruction->control_predecessors().empty()) {
add_to_ready_queue(instruction);
}
}
while (!ready_queue.empty()) {
auto best_it = ready_queue.end();
--best_it;
HloInstruction* best = best_it->second.instruction;
VLOG(2) << "Schedule instruction: " << best->ToShortString()
<< " Bytes freed: " << best_it->first.first;
ready_queue.erase(best_it);
ready_instructions.erase(best);
schedule.push_back(best);
scheduled_instructions_.insert(best);
bool adjust_ready_queue = false;
for (const LogicalBuffer* buffer : buffer_uses_.at(best)) {
int64_t& count = unscheduled_use_count_[buffer];
CHECK_GT(count, 0);
--count;
if (count == 1) {
adjust_ready_queue = true;
}
}
auto update_pred_count = [&](HloInstruction* inst) {
int64_t pred_count = --unscheduled_pred_count.at(inst);
CHECK_GE(pred_count, 0);
if (pred_count == 0) {
add_to_ready_queue(inst);
}
};
for (HloInstruction* user : best->users()) {
update_pred_count(user);
}
for (HloInstruction* succ : best->control_successors()) {
update_pred_count(succ);
}
if (adjust_ready_queue) {
for (HloInstruction* operand : best->operands()) {
for (HloInstruction* operand_user : operand->users()) {
auto ready_instructions_it = ready_instructions.find(operand_user);
if (ready_instructions_it == ready_instructions.end()) {
continue;
}
auto ready_queue_it = ready_instructions_it->second;
auto& entry = ready_queue_it->second;
Priority new_priority = GetPriority(entry);
if (new_priority == ready_queue_it->first) {
continue;
}
ready_instructions_it->second =
ready_queue.emplace(new_priority, std::move(entry));
ready_queue.erase(ready_queue_it);
}
}
}
}
CHECK_EQ(schedule.size(), computation_->instruction_count());
CHECK_EQ(scheduled_instructions_.size(), computation_->instruction_count());
return schedule;
}
HloComputation* computation_;
const TuplePointsToAnalysis& points_to_analysis_;
const BufferValue::SizeFunction& size_function_;
absl::flat_hash_map<const HloInstruction*, std::vector<const LogicalBuffer*>>
buffer_uses_;
absl::flat_hash_map<const LogicalBuffer*, int64_t> unscheduled_use_count_;
absl::flat_hash_set<const HloInstruction*> scheduled_instructions_;
};
int64_t SumLogicalBufferSizes(
const TuplePointsToAnalysis::BufferDefinitionVector& buffers,
const BufferValue::SizeFunction& size_function) {
int64_t size = 0;
for (const LogicalBuffer* buffer : buffers) {
size += size_function(*buffer);
}
return size;
}
absl::StatusOr<HloInstructionSequence> ScheduleComputationHelper(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerAlgorithm& algorithm,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
VLOG(2) << "Computation: " << computation->name();
if (algorithm) {
return algorithm(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, peak_memory);
}
return DefaultMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, peak_memory);
}
}
absl::StatusOr<HloInstructionSequence> DFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
int64_t cumulative_total_size = 0;
int64_t total_hlos = computation->instruction_count();
struct Stats {
int64_t extra_users = 0;
int64_t total_sizes = 0;
};
absl::flat_hash_map<const HloInstruction*, Stats> stats_map;
stats_map.reserve(computation->instruction_count());
for (const HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
auto& stats = stats_map[hlo];
if (ListScheduler::IgnoreInstruction(*hlo)) {
continue;
}
stats.extra_users = hlo->users().empty() ? 0 : hlo->users().size() - 1;
int64_t logical_buffer_size = SumLogicalBufferSizes(
points_to_analysis.GetBuffersDefinedByInstruction(hlo), size_function);
stats.total_sizes = logical_buffer_size;
cumulative_total_size += logical_buffer_size;
absl::flat_hash_set<const HloInstruction*> unique_operands(
hlo->operands().begin(), hlo->operands().end());
for (const HloInstruction* operand : unique_operands) {
auto& operand_stats = stats_map.at(operand);
stats.extra_users += operand_stats.extra_users;
stats.total_sizes += operand_stats.total_sizes;
}
stats.total_sizes = std::min(stats.total_sizes, cumulative_total_size);
stats.extra_users = std::min(stats.extra_users, total_hlos);
}
CHECK_EQ(stats_map.size(), computation->instruction_count());
HloInstructionSequence sequence;
FunctionVisitor visitor([&sequence](HloInstruction* hlo) {
sequence.push_back(hlo);
return absl::OkStatus();
});
visitor.ReserveVisitStates(computation->instruction_count());
TF_RETURN_IF_ERROR(computation->AcceptWithOperandOrder(
&visitor, [&stats_map](const HloInstruction* a, const HloInstruction* b) {
auto& stats_a = stats_map.at(a);
auto& stats_b = stats_map.at(b);
if (stats_a.extra_users != stats_b.extra_users) {
return stats_a.extra_users > stats_b.extra_users;
}
if (stats_a.total_sizes != stats_b.total_sizes) {
return stats_a.total_sizes > stats_b.total_sizes;
}
return a->name() < b->name();
}));
if (postprocessor) {
sequence = postprocessor(sequence);
}
CHECK_EQ(sequence.size(), computation->instruction_count());
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> BFSMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
absl::flat_hash_map<const HloInstruction*, int64_t> inst_index;
std::vector<int64_t> inst_deps(computation->instruction_count(), 0);
std::queue<HloInstruction*> ready_queue;
auto update_queue = [&](HloInstruction* inst) {
int64_t index = inst_index.at(inst);
CHECK_GE(--inst_deps[index], 0);
if (inst_deps[index] == 0) {
ready_queue.push(inst);
}
};
for (HloInstruction* inst : computation->instructions()) {
size_t index = inst_index.size();
inst_index[inst] = index;
inst_deps[index] =
inst->unique_operands().size() + inst->control_predecessors().size();
if (inst_deps[index] == 0) {
ready_queue.push(inst);
}
}
HloInstructionSequence sequence;
while (!ready_queue.empty()) {
HloInstruction* inst = ready_queue.front();
ready_queue.pop();
for (HloInstruction* user : inst->users()) update_queue(user);
for (HloInstruction* succ : inst->control_successors()) update_queue(succ);
sequence.push_back(inst);
}
CHECK_EQ(sequence.size(), computation->instruction_count());
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
ModuleSchedulerAlgorithm ComputationSchedulerToModuleScheduler(
const MemorySchedulerAlgorithm& computation_scheduler,
const MemorySchedulerPostprocessor& postprocessor) {
return [computation_scheduler, postprocessor](
const HloModule* module,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_func,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory) -> absl::StatusOr<HloSchedule> {
HloSchedule schedule(module);
for (auto* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (!computation->IsFusionComputation()) {
TF_ASSIGN_OR_RETURN(HloInstructionSequence computation_sequence,
ScheduleComputationHelper(
computation, points_to_analysis, alias_analysis,
size_func, computation_scheduler, postprocessor,
nullptr));
schedule.set_sequence(computation, std::move(computation_sequence));
}
}
if (peak_memory) {
TF_ASSIGN_OR_RETURN(*peak_memory, HeapSimulator::MinimumMemoryForModule(
schedule, size_func));
}
return std::move(schedule);
};
}
absl::StatusOr<HloInstructionSequence> ListMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
TF_ASSIGN_OR_RETURN(
HloInstructionSequence sequence,
ListScheduler::Run(computation, points_to_analysis, size_function));
if (postprocessor) {
sequence = postprocessor(sequence);
}
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> PostOrderMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
HloInstructionSequence sequence(computation->MakeInstructionPostOrder());
if (postprocessor) {
sequence = postprocessor(sequence);
}
if (peak_memory) {
TF_ASSIGN_OR_RETURN(
*peak_memory,
HeapSimulator::MinimumMemoryForComputation(
*computation, sequence, alias_analysis, size_function));
}
return sequence;
}
absl::StatusOr<HloInstructionSequence> DefaultMemoryScheduler(
HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const MemorySchedulerPostprocessor& postprocessor, int64_t* peak_memory) {
int64_t list_memory;
TF_ASSIGN_OR_RETURN(
HloInstructionSequence list_sequence,
ListMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, &list_memory));
VLOG(2) << "Min-memory list sequence: " << HumanReadableNumBytes(list_memory);
int64_t dfs_memory;
TF_ASSIGN_OR_RETURN(
HloInstructionSequence dfs_sequence,
DFSMemoryScheduler(computation, points_to_analysis, alias_analysis,
size_function, postprocessor, &dfs_memory));
VLOG(2) << "Min-memory dfs sequence: " << HumanReadableNumBytes(dfs_memory);
int64_t post_order_memory;
TF_ASSIGN_OR_RETURN(HloInstructionSequence post_order_sequence,
PostOrderMemoryScheduler(
computation, points_to_analysis, alias_analysis,
size_function, postprocessor, &post_order_memory));
VLOG(2) << "Min-memory post order sequence: "
<< HumanReadableNumBytes(post_order_memory);
auto min_memory = std::min({dfs_memory, post_order_memory, list_memory});
if (peak_memory) {
*peak_memory = min_memory;
}
if (min_memory == list_memory) {
VLOG(2) << "Chose min-memory list sequence: "
<< HumanReadableNumBytes(list_memory);
return list_sequence;
} else if (min_memory == dfs_memory) {
VLOG(2) << "Chose min-memory dfs sequence: "
<< HumanReadableNumBytes(dfs_memory);
return dfs_sequence;
} else {
VLOG(2) << "Chose min-memory post_order sequence: "
<< HumanReadableNumBytes(post_order_memory);
return post_order_sequence;
}
}
absl::StatusOr<HloSchedule> DefaultModuleScheduler(
const HloModule* module, const TuplePointsToAnalysis& points_to_analysis,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_function,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory) {
int64_t list_memory;
TF_ASSIGN_OR_RETURN(
HloSchedule list_sequence,
ComputationSchedulerToModuleScheduler(ListMemoryScheduler, {})(
module, points_to_analysis, alias_analysis, size_function,
execution_threads, &list_memory));
VLOG(2) << "Min-memory list sequence: " << HumanReadableNumBytes(list_memory);
int64_t dfs_memory;
TF_ASSIGN_OR_RETURN(
HloSchedule dfs_sequence,
ComputationSchedulerToModuleScheduler(DFSMemoryScheduler, {})(
module, points_to_analysis, alias_analysis, size_function,
execution_threads, &dfs_memory));
VLOG(2) << "Min-memory dfs sequence: " << HumanReadableNumBytes(dfs_memory);
int64_t post_order_memory;
TF_ASSIGN_OR_RETURN(
HloSchedule post_order_sequence,
ComputationSchedulerToModuleScheduler(PostOrderMemoryScheduler, {})(
module, points_to_analysis, alias_analysis, size_function,
execution_threads, &post_order_memory));
VLOG(2) << "Min-memory post order sequence: "
<< HumanReadableNumBytes(post_order_memory);
auto min_memory = std::min({dfs_memory, post_order_memory, list_memory});
if (peak_memory) {
*peak_memory = min_memory;
}
if (min_memory == list_memory) {
VLOG(2) << "Chose min-memory list sequence: "
<< HumanReadableNumBytes(list_memory);
return list_sequence;
} else if (min_memory == dfs_memory) {
VLOG(2) << "Chose min-memory dfs sequence: "
<< HumanReadableNumBytes(dfs_memory);
return dfs_sequence;
} else {
VLOG(2) << "Chose min-memory post_order sequence: "
<< HumanReadableNumBytes(post_order_memory);
return post_order_sequence;
}
}
absl::StatusOr<HloSchedule> ScheduleModule(
const HloModule* module, const BufferValue::SizeFunction& size_function,
const ModuleSchedulerAlgorithm& algorithm,
const absl::flat_hash_set<absl::string_view>& execution_threads,
int64_t* peak_memory) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaMemoryScheduler:#module=%s,program_id=%d#",
module->name(), module->unique_id());
});
TF_ASSIGN_OR_RETURN(std::unique_ptr<TuplePointsToAnalysis> points_to_analysis,
TuplePointsToAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(HloSchedule schedule,
(algorithm ? algorithm : DefaultModuleScheduler)(
module, *points_to_analysis, *alias_analysis,
size_function, execution_threads, peak_memory));
TF_RETURN_IF_ERROR(schedule.Verify());
return std::move(schedule);
}
HloMemoryScheduler::HloMemoryScheduler(
const BufferValue::SizeFunction& size_function,
const ModuleSchedulerAlgorithm& algorithm)
: size_function_(size_function), algorithm_(algorithm) {}
absl::StatusOr<bool> HloMemoryScheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleModule(module, size_function_, algorithm_, execution_threads));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
return true;
}
absl::StatusOr<bool> HloTrivialScheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloSchedule schedule(module);
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
if (!computation->IsFusionComputation()) {
HloInstructionSequence& computation_sequence =
schedule.GetOrCreateSequence(computation);
FunctionVisitor visitor(
[&computation_sequence](HloInstruction* instruction) {
computation_sequence.push_back(instruction);
return absl::OkStatus();
});
visitor.ReserveVisitStates(computation->instruction_count());
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
}
}
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
return true;
}
absl::StatusOr<bool> HloDescheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = module->has_schedule();
module->clear_schedule();
return changed;
}
} | #include "xla/service/hlo_memory_scheduler.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloSchedulingTest : public HloTestBase {};
int64_t PeakMemoryUseOfEntryComputation(
HloModule* module, LogicalBuffer::SizeFunction size_function) {
CHECK(module->has_entry_computation());
CHECK(module->has_schedule());
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module).value();
const HloSchedule& schedule = module->schedule();
HloComputation* computation = module->entry_computation();
const HloInstructionSequence& sequence = schedule.sequence(computation);
return HeapSimulator::Run(
std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
*computation, sequence, *alias_analysis, size_function)
.value()
.heap_size;
}
TEST_F(HloSchedulingTest, LastUseScheduledFirst) {
const Shape vec = ShapeUtil::MakeShape(xla::F32, {42});
auto builder = HloComputation::Builder(TestName());
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, vec, "param"));
auto ab = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kAbs, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kExp, param));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(vec, HloOpcode::kAdd, ab, exp));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vec, HloOpcode::kNegate, exp));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(vec, HloOpcode::kSubtract, add, negate));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloMemoryScheduler scheduler([](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
});
ASSERT_FALSE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool changed, scheduler.Run(module.get()));
EXPECT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
const std::vector<HloInstruction*>& sequence =
module->schedule().sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
EXPECT_EQ(param, sequence.front());
EXPECT_EQ(sub, sequence.back());
SequentialHloOrdering ordering(module->schedule());
EXPECT_TRUE(ordering.ExecutesBefore(add, negate));
HloDescheduler descheduler;
EXPECT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool descheduler_changed,
descheduler.Run(module.get()));
EXPECT_TRUE(descheduler_changed);
EXPECT_FALSE(module->has_schedule());
}
TEST_F(HloSchedulingTest, ListSchedulerHandlesAliasing) {
const char* module_str = R"(
HloModule test_aliasing_module
ENTRY root {
param = s32[1000] parameter(0)
p0 = s32[1000] copy(param)
p1 = s32[1000] copy(param)
t = (s32[1000], s32[1000]) tuple(p0, p1)
a = s32[1000] get-tuple-element(t), index=0
b = s32[1000] get-tuple-element(t), index=1
c = s32[1000] add(a, b)
d = s32[1000] add(c, b)
e = s32[1000] add(c, c)
f = s32[1000] add(e, e)
ROOT result = (s32[1000], s32[1000], s32[1000]) tuple(d, e, f)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
int64_t peak_memory;
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), size_fn,
ComputationSchedulerToModuleScheduler(ListMemoryScheduler),
{}, &peak_memory));
TF_ASSERT_OK(module->set_schedule(schedule));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
EXPECT_EQ(instructions_by_name.at("param"), sequence.front());
EXPECT_EQ(instructions_by_name.at("result"), sequence.back());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(instructions_by_name.at("d"),
instructions_by_name.at("e")));
EXPECT_EQ(PeakMemoryUseOfEntryComputation(module.get(), size_fn),
peak_memory);
}
TEST_F(HloSchedulingTest, HostSendDoneSchedule) {
const char* const module_str = R"(
HloModule module
ENTRY entry {
%p = f32[1000, 1000] parameter(0)
%token.0 = token[] after-all()
%send = (f32[1000, 1000], token[]) send(%p, %token.0),
channel_id=1, is_host_transfer=true
%n1 = f32[1000, 1000] negate(%p)
%n2 = f32[1000, 1000] negate(%n1)
%n3 = f32[1000, 1000] negate(%n2)
%send-done = token[] send-done(%send), channel_id=1, is_host_transfer=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
TF_ASSERT_OK_AND_ASSIGN(HloSchedule schedule,
ScheduleModule(module.get(), size_fn,
ComputationSchedulerToModuleScheduler(
ListMemoryScheduler)));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
EXPECT_LT(absl::c_find(sequence, instructions_by_name.at("send-done")),
absl::c_find(sequence, instructions_by_name.at("n1")));
}
TEST_F(HloSchedulingTest, TuplesAreAccountedCorrectly) {
auto builder = HloComputation::Builder(TestName());
const Shape r1f32 = ShapeUtil::MakeShape(xla::F32, {6});
auto lit = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1, 1})));
auto abs_const = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, lit));
auto abs_abs1 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
absl::Span<HloInstruction* const>({abs_abs1})));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
auto abs_abs2 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
builder.AddInstruction(HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd,
tuple_elm, abs_abs2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 1);
},
ComputationSchedulerToModuleScheduler(ListMemoryScheduler)));
EXPECT_EQ(module->entry_computation()->instruction_count(),
schedule.sequence(module->entry_computation()).size());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(abs_abs2, tuple));
}
TEST_F(HloSchedulingTest, MultiOutputFusionAccountedCorrectly) {
const Shape r1f32 = ShapeUtil::MakeShape(xla::F32, {5});
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1, 1})));
auto c2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4, 5})));
auto c3 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({0, 2, 4, 6, 8})));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, c1, c2));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kMultiply, add, c3));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({add, mul}));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kExp, c3));
builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, tuple_elm, exp));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{tuple, mul, add}, HloInstruction::FusionKind::kLoop);
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 2);
},
ComputationSchedulerToModuleScheduler(ListMemoryScheduler)));
EXPECT_EQ(module->entry_computation()->instruction_count(),
schedule.sequence(module->entry_computation()).size());
SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(exp, fusion));
}
TEST_F(HloSchedulingTest, TrivialScheduler) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_FALSE(module->has_schedule());
TF_ASSERT_OK(HloTrivialScheduler().Run(module.get()).status());
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
std::unique_ptr<HloModule> clone = module->Clone();
ASSERT_TRUE(clone->has_schedule());
TF_ASSERT_OK(clone->schedule().Verify());
}
TEST_F(HloSchedulingTest, BFSScheduler) {
const char* const hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY e {
p0 = f32[1,2,1,512,256] parameter(0)
c0 = f32[] constant(0)
c1 = f32[] constant(1)
bcast1 = f32[1,2,1,512,256] broadcast(c1), dimensions={}
add1 = f32[1,2,1,512,256] add(p0, bcast1)
c2 = f32[] constant(2)
bcast2 = f32[1,2,1,512,256] broadcast(c2), dimensions={}
add2 = f32[1,2,1,512,256] add(p0, bcast2)
c3 = f32[] constant(3)
bcast3 = f32[1,2,1,512,256] broadcast(c3), dimensions={}
add3 = f32[1,2,1,512,256] add(p0, bcast3)
c4 = f32[] constant(4)
bcast4 = f32[1,2,1,512,256] broadcast(c4), dimensions={}
add4 = f32[1,2,1,512,256] add(p0, bcast4)
c5 = f32[] constant(5)
bcast5 = f32[1,2,1,512,256] broadcast(c5), dimensions={}
add5 = f32[1,2,1,512,256] add(p0, bcast5)
r1 = f32[1,2] reduce(add1, c0), dimensions={2,3,4}, to_apply=add
r2 = f32[1,2] reduce(add2, c0), dimensions={2,3,4}, to_apply=add
r3 = f32[1,2] reduce(add3, c0), dimensions={2,3,4}, to_apply=add
r4 = f32[1,2] reduce(add4, c0), dimensions={2,3,4}, to_apply=add
r5 = f32[1,2] reduce(add5, c0), dimensions={2,3,4}, to_apply=add
out0 = f32[1,2] add(r1, r2)
out1 = f32[1,2] add(r3, r4)
out2 = f32[1,2] add(out0, out1)
ROOT out3 = f32[1,2] add(out2, r5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(
module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
},
ComputationSchedulerToModuleScheduler(BFSMemoryScheduler)));
const std::vector<HloInstruction*>& sequence =
schedule.sequence(module->entry_computation()).instructions();
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name;
for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
auto index = [&](std::string_view name) -> size_t {
const HloInstruction* instruction = instructions_by_name.at(name);
return std::distance(sequence.begin(), absl::c_find(sequence, instruction));
};
std::vector<size_t> indices = {
index("bcast1"), index("bcast2"), index("bcast3"), index("bcast4"),
index("bcast5"), index("add1"), index("add2"), index("add3"),
index("add4"), index("add5"), index("r1"), index("r2"),
index("r3"), index("r4"), index("r5"), index("out0"),
index("out1"), index("out2"), index("out3")};
EXPECT_TRUE(absl::c_is_sorted(indices));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_memory_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_memory_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05faf2a9-e04b-4634-a8f0-167672c2e325 | cpp | tensorflow/tensorflow | logistic_expander | third_party/xla/xla/service/logistic_expander.cc | third_party/xla/xla/service/logistic_expander_test.cc | #include "xla/service/logistic_expander.h"
#include <optional>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
bool LogisticExpander::InstructionMatchesPattern(HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kLogistic;
}
absl::StatusOr<HloInstruction*> LogisticExpander::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* operand = instruction->mutable_operand(0);
const Shape operand_shape = operand->shape();
HloInstruction* one_constant = MakeScalarLike(operand, 1.0f);
HloInstruction* exp_instr =
MakeUnaryHlo(HloOpcode::kExp,
MakeUnaryHlo(HloOpcode::kNegate, operand).value())
.value();
HloInstruction* denominator =
MakeBinaryHlo(HloOpcode::kAdd, one_constant, exp_instr).value();
return MakeBinaryHlo(HloOpcode::kDivide, one_constant, denominator).value();
}
} | #include "xla/service/logistic_expander.h"
#include <memory>
#include <string_view>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/dynamic_padder.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
class LogisticExpanderTest : public HloTestBase {};
TEST_F(LogisticExpanderTest, ExpandWith) {
const char* kModuleStr = R"(
HloModule m
test {
p = f32[2,3] parameter(0)
ROOT r = f32[2,3] logistic(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
auto computation = m->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kLogistic);
LogisticExpander logistic_expander;
ASSERT_TRUE(logistic_expander.Run(m.get()).value());
root = computation->root_instruction();
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Divide(
m::Broadcast(m::ConstantScalar(1.0)),
m::AddAnyOrder(m::Broadcast(m::ConstantScalar(1.0)),
m::Exp(m::Negate(m::Parameter(0)))))));
}
TEST_F(LogisticExpanderTest, DynamicDimensions) {
constexpr std::string_view hlo = R"(
HloModule DynamicDimensions
ENTRY main {
p = f32[<=10] parameter(0)
ROOT root = f32[<=10] logistic(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
LogisticExpander logistic_expander;
ASSERT_TRUE(logistic_expander.Run(module.get()).value());
DynamicPadder dynamic_padder;
EXPECT_TRUE(dynamic_padder.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/logistic_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/logistic_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
29bd60c2-1063-443b-b3e5-ead2ffba7234 | cpp | tensorflow/tensorflow | optimize_input_output_buffer_alias | third_party/xla/xla/service/optimize_input_output_buffer_alias.cc | third_party/xla/xla/service/optimize_input_output_buffer_alias_test.cc | #include "xla/service/optimize_input_output_buffer_alias.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> OptimizeInputOutputBufferAlias::Build(
absl::Span<const Shape> input_shapes, const Shape& output_shape,
HloInputOutputAliasConfig* alias_config,
HloBufferDonorConfig* buffer_donor_config) {
bool changed = false;
if (output_shape.is_dynamic()) {
return false;
}
struct DonorEntry {
int64_t param_number;
ShapeIndex index;
int64_t shape_size;
};
absl::flat_hash_map<int64_t, std::vector<DonorEntry>> donors;
for (int64_t param_number = 0; param_number < input_shapes.size();
++param_number) {
const Shape& input_shape = input_shapes[param_number];
TF_RET_CHECK(LayoutUtil::HasLayout(input_shape));
VLOG(1) << "input_shape: " << input_shape.ToString();
ShapeUtil::ForEachSubshape(input_shape, [&](const Shape& subshape,
const ShapeIndex& index) {
if (!LayoutUtil::IsDenseArray(subshape) || subshape.is_dynamic()) {
return;
}
if (alias_config->ParameterHasAlias(param_number, index)) {
return;
}
if (registered_buffer_donor_only_ &&
!buffer_donor_config->ParameterIsBufferDonor(param_number, index)) {
return;
}
int64_t memory_space = subshape.layout().memory_space();
donors[memory_space].emplace_back(
DonorEntry{param_number, index, shape_size_fn_(subshape)});
});
}
struct DoneeEntry {
ShapeIndex index;
int64_t shape_size;
};
absl::flat_hash_map<int64_t, std::vector<DoneeEntry>> donees;
TF_RET_CHECK(LayoutUtil::HasLayout(output_shape));
VLOG(1) << "output_shape: " << output_shape.ToString();
ShapeUtil::ForEachSubshape(
output_shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (!LayoutUtil::IsDenseArray(subshape)) {
return;
}
if (alias_config->OutputHasAlias(index)) {
return;
}
int64_t memory_space = subshape.layout().memory_space();
donees[memory_space].emplace_back(
DoneeEntry{index, shape_size_fn_(subshape)});
});
for (auto& [memory_space, donor_vector] : donors) {
auto donee_it = donees.find(memory_space);
if (donee_it == donees.end()) {
continue;
}
auto& donee_vector = donee_it->second;
absl::c_stable_sort(donor_vector,
[](const DonorEntry& a, const DonorEntry& b) -> bool {
return a.shape_size > b.shape_size;
});
absl::c_stable_sort(donee_vector,
[](const DoneeEntry& a, const DoneeEntry& b) -> bool {
return a.shape_size > b.shape_size;
});
int64_t donor_vector_index = 0;
int64_t donee_vector_index = 0;
while (donor_vector_index < donor_vector.size() &&
donee_vector_index < donee_vector.size()) {
const auto& donor = donor_vector[donor_vector_index];
const auto& donee = donee_vector[donee_vector_index];
if (donor.shape_size > donee.shape_size) {
donor_vector_index += 1;
} else if (donor.shape_size < donee.shape_size) {
donee_vector_index += 1;
} else {
TF_RETURN_IF_ERROR(alias_config->SetUpAlias(
donee.index, donor.param_number, donor.index));
TF_RETURN_IF_ERROR(buffer_donor_config->RemoveBufferDonor(
donor.param_number, donor.index));
donor_vector_index += 1;
donee_vector_index += 1;
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> OptimizeInputOutputBufferAlias::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto& entry_computation_layout = module->entry_computation_layout();
std::vector<Shape> input_shapes;
input_shapes.reserve(module->entry_computation()->num_parameters());
for (int64_t i = 0; i < module->entry_computation()->num_parameters(); ++i) {
input_shapes.push_back(entry_computation_layout.parameter_shape(i));
}
const Shape& output_shape = entry_computation_layout.result_shape();
HloInputOutputAliasConfig* alias_config =
&module->input_output_alias_config();
HloBufferDonorConfig* buffer_donor_config = &module->buffer_donor_config();
TF_ASSIGN_OR_RETURN(bool changed, Build(input_shapes, output_shape,
alias_config, buffer_donor_config));
TF_RETURN_IF_ERROR(alias_config->Verify(*module, shape_size_fn_));
return changed;
}
} | #include "xla/service/optimize_input_output_buffer_alias.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
class OptimizeInputOutputBufferAliasTest : public HloTestBase {
protected:
OptimizeInputOutputBufferAliasTest() {
r1f32_ = ShapeUtil::MakeShape(F32, {4});
r2f32_ = ShapeUtil::MakeShape(F32, {4, 5});
r3f32_ = ShapeUtil::MakeShape(F32, {4, 5, 6});
r4f32_ = ShapeUtil::MakeShape(F32, {4, 5, 6, 7});
d1f32_ = ShapeUtil::MakeShape(F32, {256}, {true});
d2f32_ = ShapeUtil::MakeShape(F32, {128, 128},
{false, true});
d3f32_ = ShapeUtil::MakeShape(F32, {512});
}
void CreatePassAndBufferDonorConfig(
bool registered_donor_buffer_only = false) {
optimize_pass_ = std::make_unique<OptimizeInputOutputBufferAlias>(
registered_donor_buffer_only);
buffer_donor_config_ = HloBufferDonorConfig();
}
int64_t AliasCount() {
int64_t count = 0;
alias_config_.ForEachAlias(
[&](const ShapeIndex&, const HloInputOutputAliasConfig::Alias&) {
count++;
});
return count;
}
bool BuildAliasConfig(const std::vector<Shape>& input_shapes,
const Shape& output_shape) {
alias_config_ = HloInputOutputAliasConfig(output_shape);
auto changed = optimize_pass_->Build(input_shapes, output_shape,
&alias_config_, &buffer_donor_config_);
TF_CHECK_OK(changed.status());
return changed.value();
}
std::unique_ptr<OptimizeInputOutputBufferAlias> optimize_pass_;
HloInputOutputAliasConfig alias_config_;
HloBufferDonorConfig buffer_donor_config_;
Shape r1f32_;
Shape r2f32_;
Shape r3f32_;
Shape r4f32_;
Shape d1f32_;
Shape d2f32_;
Shape d3f32_;
};
TEST_F(OptimizeInputOutputBufferAliasTest, AllDifferentBufferSizes) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({r1f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, OrderedNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{0});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {3}), ShapeIndex{3});
}
TEST_F(OptimizeInputOutputBufferAliasTest, PartialReuseNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r1f32_, r2f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 2);
EXPECT_TRUE(alias_config_.OutputHasAlias(ShapeIndex{0}));
EXPECT_TRUE(alias_config_.OutputHasAlias(ShapeIndex{1}));
EXPECT_FALSE(alias_config_.OutputHasAlias(ShapeIndex{2}));
EXPECT_FALSE(alias_config_.OutputHasAlias(ShapeIndex{3}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, UnorderedNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r4f32_, r3f32_, r2f32_, r1f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{3});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {3}), ShapeIndex{0});
}
TEST_F(OptimizeInputOutputBufferAliasTest, UnorderedNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({r1f32_}), r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape(
{r1f32_, ShapeUtil::MakeTupleShape({r3f32_, r2f32_}), r2f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 3);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0, 0}), ShapeIndex{0});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex({1, 1}));
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex({1, 0}));
EXPECT_FALSE(alias_config_.ParameterHasAlias(0, {0, 3}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, MultipleParameters) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {{r1f32_, r2f32_, r3f32_, r4f32_}};
Shape output = ShapeUtil::MakeTupleShape({r4f32_, r3f32_, r2f32_, r1f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {}), ShapeIndex{3});
EXPECT_EQ(alias_config_.GetAliasedOutput(1, {}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(2, {}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(3, {}), ShapeIndex{0});
}
TEST_F(OptimizeInputOutputBufferAliasTest, BufferDonorOnly) {
CreatePassAndBufferDonorConfig(true);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({r1f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r2f32_, r1f32_});
TF_CHECK_OK(buffer_donor_config_.AddBufferDonor(0, {0}));
EXPECT_TRUE(buffer_donor_config_.ParameterIsBufferDonor(0, {0}));
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 1);
EXPECT_FALSE(buffer_donor_config_.ParameterIsBufferDonor(0, {0}));
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{1});
EXPECT_FALSE(alias_config_.GetAliasedOutput(0, {1}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeWithTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({d1f32_, d2f32_})};
Shape output = ShapeUtil::MakeTupleShape({d1f32_, d2f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeNoTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d1f32_, d2f32_};
Shape output = d1f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeBufferOutput) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d1f32_, d2f32_};
Shape output = d3f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeBufferInput) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d3f32_};
Shape output = d1f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, AllDifferentMemorySpaces) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
for (int i = 0; i < output.tuple_shapes_size(); ++i) {
output.mutable_tuple_shapes(i)->mutable_layout()->set_memory_space(
Layout::kHostMemorySpace);
}
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/optimize_input_output_buffer_alias.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/optimize_input_output_buffer_alias_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b10bbd84-8d23-498a-992f-f9d31a286f23 | cpp | tensorflow/tensorflow | zero_sized_hlo_elimination | third_party/xla/xla/service/zero_sized_hlo_elimination.cc | third_party/xla/xla/service/zero_sized_hlo_elimination_test.cc | #include "xla/service/zero_sized_hlo_elimination.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> ZeroSizedHloElimination::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : comp->MakeInstructionPostOrder()) {
if (instruction->HasSideEffect() || !instruction->shape().IsArray() ||
instruction->opcode() == HloOpcode::kConstant) {
continue;
}
if (comp->IsSafelyRemovable(instruction) &&
ShapeUtil::IsZeroElementArray(instruction->shape()) &&
instruction->shape().is_static()) {
Shape shape = instruction->shape();
if (!LayoutUtil::HasLayout(shape)) {
LayoutUtil::SetToDefaultLayout(&shape);
}
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instruction,
HloInstruction::CreateConstant(Literal::CreateFromShape(shape))));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/zero_sized_hlo_elimination.h"
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ZeroSizedHloEliminationTest : public HloTestBase {
protected:
ZeroSizedHloEliminationTest()
: HloTestBase(),
builder_("zero_sized_computation"),
zero_sized_param_(
builder_.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 0}), "zero sized param"))) {}
absl::StatusOr<bool> RunZeroSizedElimination() {
auto module = CreateNewVerifiedModule("zero_sized_elimination_test_module");
module->AddEntryComputation(builder_.Build());
return ZeroSizedHloElimination{}.Run(module.get());
}
HloComputation::Builder builder_;
HloInstruction* zero_sized_param_;
};
TEST_F(ZeroSizedHloEliminationTest, EliminatedZeroSizedOp) {
builder_.AddInstruction(HloInstruction::CreateUnary(
zero_sized_param_->shape(), HloOpcode::kTanh, zero_sized_param_));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_TRUE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateParameter) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateSideEffects) {
auto token = builder_.AddInstruction(HloInstruction::CreateToken());
auto send = builder_.AddInstruction(
HloInstruction::CreateSend(zero_sized_param_, token, 0));
builder_.AddInstruction(HloInstruction::CreateSendDone(send));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateConstant) {
builder_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1({})));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, ZeroSizedInstructionWithoutLayoutFolded) {
Shape op_shape = ShapeUtil::MakeShape(F32, {4, 0});
op_shape.clear_layout();
HloInstruction* param1 = builder_.AddInstruction(
HloInstruction::CreateParameter(1, op_shape, "zero sized param 1"));
HloInstruction* param2 = builder_.AddInstruction(
HloInstruction::CreateParameter(2, op_shape, "zero sized param 2"));
builder_.AddInstruction(
HloInstruction::CreateBinary(op_shape, HloOpcode::kAdd, param1, param2));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/zero_sized_hlo_elimination.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/zero_sized_hlo_elimination_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07907015-2bf1-4447-b999-7e04d57e2766 | cpp | tensorflow/tensorflow | bfloat16_propagation | third_party/xla/xla/service/bfloat16_propagation.cc | third_party/xla/xla/service/bfloat16_propagation_test.cc | #include "xla/service/bfloat16_propagation.h"
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
BFloat16Propagation::BFloat16Propagation(const FloatSupport* bfloat16_support)
: bfloat16_support_(bfloat16_support) {
DCHECK_EQ(bfloat16_support->LowPrecisionType(), BF16);
}
void BFloat16Propagation::DetermineFusionComputationPrecision(
HloInstruction* fusion) {
CHECK_EQ(fusion->opcode(), HloOpcode::kFusion);
if (!bfloat16_support_->SupportsMixedPrecisions(*fusion)) {
return;
}
auto root = fusion->fused_instructions_computation()->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(fusion, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(root, index, BF16);
VLOG(2) << "Fused root " << root->ToString() << " at shape index "
<< index << " changed to BF16 precision for fusion "
<< fusion->ToString();
}
});
auto insts =
fusion->fused_instructions_computation()->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(
fusion->fused_instructions_computation());
RevertIfFusionInternalBF16Changes(fusion);
}
void BFloat16Propagation::RevertIfFusionInternalBF16Changes(
HloInstruction* fusion) {
auto has_changes = [this](HloInstruction* inst) {
auto it = changes_to_bf16_.find(inst);
return it != changes_to_bf16_.end() && !it->second.empty();
};
auto root = fusion->fused_instructions_computation()->root_instruction();
absl::flat_hash_set<const HloValue*> changed_root_buffers;
auto root_changes_it = changes_to_bf16_.find(root);
if (root_changes_it != changes_to_bf16_.end()) {
for (const auto& entry : root_changes_it->second) {
for (const HloValue* value :
dataflow_->GetValueSet(root, entry.second).values()) {
changed_root_buffers.insert(value);
}
}
}
auto aliases_changed_root_buffer = [this, &changed_root_buffers](
const HloInstruction* inst) {
bool aliasing = false;
ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape,
const ShapeIndex& index) {
if (aliasing) {
return;
}
if (subshape.element_type() != F32) {
return;
}
aliasing = absl::c_any_of(dataflow_->GetValueSet(inst, index).values(),
IsValueIn(changed_root_buffers));
});
return aliasing;
};
for (auto inst :
fusion->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kParameter) {
continue;
}
if (aliases_changed_root_buffer(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kFusion) {
bool parameter_reverted = false;
for (int64_t i = 0; i < inst->operand_count(); ++i) {
if (has_changes(inst->mutable_operand(i))) {
continue;
}
auto* fused_parameter = inst->fused_parameter(i);
if (has_changes(fused_parameter)) {
changes_to_bf16_.erase(fused_parameter);
parameter_reverted = true;
}
}
if (parameter_reverted) {
RevertIfFusionInternalBF16Changes(inst);
}
}
if (!has_changes(inst)) {
continue;
}
bool revert_changes = true;
for (auto operand : inst->operands()) {
if (has_changes(operand)) {
revert_changes = false;
break;
}
}
if (revert_changes) {
changes_to_bf16_.erase(inst);
}
}
}
void BFloat16Propagation::DetermineWhileComputationsPrecision(
HloInstruction* while_hlo) {
CHECK_EQ(while_hlo->opcode(), HloOpcode::kWhile);
HloComputation* body = while_hlo->while_body();
auto body_root = body->root_instruction();
HloComputation* condition = while_hlo->while_condition();
ShapeUtil::ForEachSubshape(
body_root->shape(), [this, while_hlo, body_root](
const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(while_hlo, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(body_root, index, BF16);
VLOG(2) << "While body root " << body_root->ToString()
<< " at shape index " << index
<< " changed to BF16 precision for while "
<< while_hlo->ToString();
}
});
auto body_insts = body->MakeInstructionPostOrder();
for (auto inst_it = body_insts.rbegin(); inst_it != body_insts.rend();
++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(body);
auto condition_insts = condition->MakeInstructionPostOrder();
for (auto inst_it = condition_insts.rbegin();
inst_it != condition_insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(condition);
}
void BFloat16Propagation::DetermineConditionalComputationsPrecision(
HloInstruction* cond) {
CHECK_EQ(cond->opcode(), HloOpcode::kConditional);
for (int64_t i = 0; i < cond->branch_count(); ++i) {
auto branch = cond->branch_computation(i);
auto root = branch->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(cond, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(root, index, BF16);
VLOG(2) << "Conditional branch " << i << " root "
<< root->ToString() << " at shape index " << index
<< " changed to BF16 precision for conditional "
<< cond->ToString();
}
});
auto insts = branch->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(branch);
}
}
bool BFloat16Propagation::AllUsersConsumeBF16(const HloInstruction& hlo,
const ShapeIndex& index) const {
const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index);
if (subshape.element_type() != BF16 && subshape.element_type() != F32) {
return false;
}
auto& value_set = dataflow_->GetValueSet(&hlo, index);
for (const HloValue* value : value_set.values()) {
if (ContainsKey(values_that_must_be_kept_as_f32_, value)) {
return false;
}
if (value->shape().element_type() == BF16) {
continue;
}
for (const HloUse& use : value->GetUses()) {
if (!ContainsKey(instructions_visited_in_backward_pass_,
use.instruction)) {
continue;
}
if (use.instruction->HasSideEffectNoRecurse()) {
return false;
}
if (use.instruction->opcode() == HloOpcode::kFusion) {
auto* fused_parameter =
use.instruction->fused_parameter(use.operand_number);
if (OutputTypeAfterChange(fused_parameter, use.operand_index) != BF16) {
return false;
}
continue;
} else if (use.instruction->opcode() == HloOpcode::kWhile) {
auto* cond_parameter =
use.instruction->while_condition()->parameter_instruction(
use.operand_number);
if (OutputTypeAfterChange(cond_parameter, use.operand_index) != BF16) {
return false;
}
auto* body_parameter =
use.instruction->while_body()->parameter_instruction(
use.operand_number);
if (OutputTypeAfterChange(body_parameter, use.operand_index) != BF16) {
return false;
}
continue;
} else if (use.instruction->opcode() == HloOpcode::kConditional) {
auto* cond_parameter =
use.instruction->branch_computation(use.operand_number - 1)
->parameter_instruction(0);
if (OutputTypeAfterChange(cond_parameter, use.operand_index) != BF16) {
return false;
}
continue;
}
if (bfloat16_support_->EffectiveOperandPrecisionIsLowPrecision(
*use.instruction, use.operand_number)) {
continue;
}
if (bfloat16_support_->EffectiveOperandPrecisionIsOutputPrecision(
*use.instruction, use.operand_number)) {
if (use.instruction->opcode() == HloOpcode::kTuple ||
(use.instruction->opcode() == HloOpcode::kAllReduce &&
use.instruction->shape().IsTuple())) {
ShapeIndex use_output_index{use.operand_number};
for (int64_t i : use.operand_index) {
use_output_index.push_back(i);
}
if (OutputTypeAfterChange(use.instruction, use_output_index) ==
BF16) {
continue;
}
} else if (use.instruction->opcode() == HloOpcode::kGetTupleElement) {
ShapeIndex use_output_index;
for (int64_t i = 1; i < use.operand_index.size(); ++i) {
use_output_index.push_back(use.operand_index[i]);
}
if (OutputTypeAfterChange(use.instruction, use_output_index) ==
BF16) {
continue;
}
} else {
if (OutputTypeAfterChange(use.instruction, use.operand_index) ==
BF16) {
continue;
}
}
}
return false;
}
}
return true;
}
bool BFloat16Propagation::ShouldKeepPrecisionUnchanged(
const HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kFusion &&
inst->fusion_kind() == HloInstruction::FusionKind::kCustom) {
return ShouldKeepPrecisionUnchanged(
inst->fused_instructions_computation()->root_instruction());
}
return (inst->opcode() == HloOpcode::kCustomCall &&
!inst->IsCustomCall("AllocateBuffer")) ||
inst->opcode() == HloOpcode::kCall ||
inst->opcode() == HloOpcode::kBitcastConvert ||
inst->HasSideEffectNoRecurse();
}
void BFloat16Propagation::DetermineInstructionPrecision(HloInstruction* hlo,
bool skip_parameters) {
bool postpone_processing_called_computations = false;
absl::Cleanup cleaner = [this, hlo,
&postpone_processing_called_computations] {
if (!postpone_processing_called_computations) {
if (hlo->opcode() == HloOpcode::kFusion) {
DetermineFusionComputationPrecision(hlo);
} else if (hlo->opcode() == HloOpcode::kWhile) {
DetermineWhileComputationsPrecision(hlo);
} else if (hlo->opcode() == HloOpcode::kConditional) {
DetermineConditionalComputationsPrecision(hlo);
}
}
instructions_visited_in_backward_pass_.insert(hlo);
};
if (hlo->opcode() == HloOpcode::kWhile &&
(caller_counts_[hlo->while_condition()] > 1 ||
caller_counts_[hlo->while_body()] > 1)) {
postpone_processing_called_computations = true;
return;
}
if (hlo->opcode() == HloOpcode::kConditional &&
absl::c_any_of(hlo->branch_computations(), [&](const HloComputation* c) {
return caller_counts_[c] > 1;
})) {
postpone_processing_called_computations = true;
return;
}
CHECK(hlo->parent() != nullptr);
if (hlo == hlo->parent()->root_instruction()) {
if (!hlo->parent()->IsFusionComputation()) {
ShapeUtil::ForEachSubshape(hlo->shape(), [&](const Shape& ,
const ShapeIndex& index) {
if (OutputTypeAfterChange(hlo, index) != F32) {
return;
}
for (const auto* value : dataflow_->GetValueSet(hlo, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
});
}
return;
}
if (ShouldKeepPrecisionUnchanged(hlo) ||
(hlo->opcode() == HloOpcode::kParameter && skip_parameters)) {
return;
}
if (!ContainsKey(consider_using_bfloat16_, hlo)) {
return;
}
if (!bfloat16_support_->SupportsLowPrecisionOutput(*hlo)) {
return;
}
ShapeUtil::ForEachSubshape(
hlo->shape(),
[hlo, this](const Shape& , const ShapeIndex& index) {
if (OutputTypeAfterChange(hlo, index) == F32 &&
AllUsersConsumeBF16(*hlo, index)) {
AddToOrRemoveFromBF16ChangeSet(hlo, index, BF16);
VLOG(2) << "HloInstruction output at shape index " << index
<< " changed to BF16 precision: " << hlo->ToString();
}
});
}
bool BFloat16Propagation::InstructionIsCandidateForBF16Output(
HloInstruction* hlo) {
if (!bfloat16_support_->SupportsMixedPrecisions(*hlo) &&
hlo->opcode() != HloOpcode::kTuple &&
hlo->opcode() != HloOpcode::kGetTupleElement &&
hlo->opcode() != HloOpcode::kDomain &&
hlo->shape().element_type() != BF16) {
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
if (!bfloat16_support_->EffectiveOperandPrecisionIsOutputPrecision(*hlo,
i) ||
!ContainsKey(consider_using_bfloat16_, hlo->operand(i))) {
return false;
}
}
}
return true;
}
void BFloat16Propagation::AdjustCalledComputationParameters(
HloInstruction* hlo) {
auto adjust_computation = [this, hlo](
HloComputation* computation,
absl::Span<HloInstruction* const> operands) {
CHECK_EQ(operands.size(), computation->num_parameters());
for (int64_t i = 0; i < operands.size(); ++i) {
auto parameter = computation->parameter_instruction(i);
ShapeUtil::ForEachSubshape(
parameter->shape(),
[this, i, hlo, &operands, parameter](const Shape& ,
const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(parameter->shape(), index)) {
return;
}
PrimitiveType operand_type =
OutputTypeAfterChange(operands[i], index);
if (OutputTypeAfterChange(parameter, index) == operand_type) {
return;
}
AddToOrRemoveFromBF16ChangeSet(parameter, index, operand_type);
VLOG(2) << "Called computation parameter " << parameter->ToString()
<< " at shape index " << index << " adjusted to "
<< (operand_type == BF16 ? "BF16" : "F32")
<< " to match operand in HLO " << hlo->ToString();
});
}
};
switch (hlo->opcode()) {
case HloOpcode::kFusion:
adjust_computation(hlo->fused_instructions_computation(),
hlo->operands());
break;
case HloOpcode::kWhile:
adjust_computation(hlo->while_condition(), hlo->operands());
adjust_computation(hlo->while_body(), hlo->operands());
break;
case HloOpcode::kConditional:
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
adjust_computation(hlo->branch_computation(i),
{hlo->mutable_operand(i + 1)});
}
break;
default:
break;
}
}
void BFloat16Propagation::AdjustCalledComputationRoot(HloInstruction* hlo) {
auto adjust_computation = [this, hlo](HloComputation* computation,
HloInstruction* output) {
HloInstruction* root = computation->root_instruction();
ShapeUtil::ForEachSubshape(root->shape(), [this, hlo, root, output](
const Shape& ,
const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(hlo->shape(), index)) {
return;
}
const PrimitiveType output_type = OutputTypeAfterChange(output, index);
if (OutputTypeAfterChange(root, index) == output_type) {
return;
}
AddToOrRemoveFromBF16ChangeSet(root, index, output_type);
if (output_type == F32) {
for (const auto* value : dataflow_->GetValueSet(root, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
}
VLOG(2) << "Called computation root " << root->ToString()
<< " at shape index " << index << " adjusted to "
<< (output_type == BF16 ? "BF16" : "F32")
<< " to match output shape of " << hlo->ToString();
});
};
switch (hlo->opcode()) {
case HloOpcode::kFusion:
adjust_computation(hlo->fused_instructions_computation(), hlo);
break;
case HloOpcode::kWhile:
adjust_computation(hlo->while_body(), hlo);
break;
case HloOpcode::kConditional:
for (auto* branch : hlo->branch_computations()) {
adjust_computation(branch, hlo);
}
break;
default:
break;
}
}
bool BFloat16Propagation::ResolveInconsistencyOfAliasingBuffersHelper(
HloComputation* computation,
absl::flat_hash_set<const HloComputation*>* visited_computations) {
bool parameter_changed = false;
auto insts = computation->MakeInstructionPostOrder();
while (true) {
bool any_change = false;
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
auto hlo = *inst_it;
auto adjust_hlo_output = [&](const Shape& ,
const ShapeIndex& index) {
const PrimitiveType output_type = OutputTypeAfterChange(hlo, index);
VLOG(2) << "output_type is " << ((output_type == BF16) ? "BF16" : "F32")
<< " for :" << hlo->ToString() << "\n";
if (output_type != F32 && output_type != BF16) {
return;
}
PrimitiveType type = BF16;
for (const auto* value : dataflow_->GetValueSet(hlo, index).values()) {
auto value_type = ValueTypeAfterChange(value);
if (value_type == BF16) {
continue;
}
VLOG(2) << "Adjust to F32 due to aliased dataflow value: "
<< value->ToString() << "\n";
CHECK_EQ(value_type, F32);
type = F32;
break;
}
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(hlo)) {
if (operand_and_output_index.second == index) {
const HloOperandIndex& operand_index =
operand_and_output_index.first;
for (const auto* value :
dataflow_
->GetValueSet(hlo->operand(operand_index.operand_number),
operand_index.operand_index)
.values()) {
auto value_type = ValueTypeAfterChange(value);
if (value_type == BF16) {
continue;
}
VLOG(2) << "Adjust to F32 due to InputOutPair: "
<< value->ToString() << "\n";
CHECK_EQ(value_type, F32);
type = F32;
break;
}
}
}
if (type == BF16 && !AllUsersConsumeBF16(*hlo, index)) {
VLOG(2) << "Adjust to F32 due to All user consumeBF16 fail\n";
type = F32;
}
if (type == F32) {
for (const auto* value :
dataflow_->GetValueSet(hlo, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
}
if (type != output_type) {
any_change = true;
AddToOrRemoveFromBF16ChangeSet(hlo, index, type);
VLOG(2) << "HloInstruction output at shape index " << index
<< " adjusted to " << (type == BF16 ? "BF16" : "F32") << ": "
<< hlo->ToString();
if (hlo->opcode() == HloOpcode::kParameter) {
parameter_changed = true;
}
}
};
ShapeUtil::ForEachSubshape(hlo->shape(), adjust_hlo_output);
AdjustCalledComputationRoot(hlo);
if (hlo->opcode() == HloOpcode::kWhile) {
absl::flat_hash_set<const HloComputation*> visited_in_while;
while (ResolveInconsistencyOfAliasingBuffersHelper(
hlo->while_condition(), &visited_in_while) ||
ResolveInconsistencyOfAliasingBuffersHelper(hlo->while_body(),
&visited_in_while)) {
visited_in_while.clear();
ShapeUtil::ForEachSubshape(hlo->shape(), adjust_hlo_output);
AdjustCalledComputationRoot(hlo);
}
visited_computations->insert(visited_in_while.begin(),
visited_in_while.end());
} else if (hlo->opcode() == HloOpcode::kFusion) {
ResolveInconsistencyOfAliasingBuffersHelper(
hlo->fused_instructions_computation(), visited_computations);
} else if (hlo->opcode() == HloOpcode::kConditional) {
for (auto* branch : hlo->branch_computations()) {
ResolveInconsistencyOfAliasingBuffersHelper(branch,
visited_computations);
}
}
}
if (!any_change) {
break;
}
}
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
AdjustCalledComputationParameters(*inst_it);
}
return parameter_changed;
}
void BFloat16Propagation::ResolveInconsistencyOfAliasingBuffers(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto& computations_topological_order =
module->MakeComputationPostOrder(execution_threads);
absl::flat_hash_set<const HloComputation*> resolved;
for (auto comp_it = computations_topological_order.rbegin();
comp_it != computations_topological_order.rend(); ++comp_it) {
if (ContainsKey(resolved, *comp_it)) {
continue;
}
ResolveInconsistencyOfAliasingBuffersHelper(*comp_it, &resolved);
}
}
absl::Status BFloat16Propagation::ResolveInconsistentFusions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto computation : module->MakeComputationPostOrder(execution_threads)) {
auto insts = computation->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
auto hlo = *inst_it;
if (hlo->opcode() != HloOpcode::kFusion) {
continue;
}
auto fusion_computation = hlo->fused_instructions_computation();
auto fusion_root = fusion_computation->root_instruction();
if (ShapeUtil::Compatible(fusion_root->shape(), hlo->shape())) {
continue;
}
ShapeTree<HloInstruction*> converted_outputs(hlo->shape());
TF_ASSIGN_OR_RETURN(
HloInstruction * copy,
fusion_computation->DeepCopyInstructionWithCustomCopier(
fusion_root,
[hlo](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
const Shape& hlo_subshape =
ShapeUtil::GetSubshape(hlo->shape(), leaf_index);
if (ShapeUtil::Compatible(leaf->shape(), hlo_subshape)) {
return leaf;
}
return comp->AddInstruction(
HloInstruction::CreateConvert(hlo_subshape, leaf));
}));
fusion_computation->set_root_instruction(copy);
}
}
return absl::OkStatus();
}
absl::Status BFloat16Propagation::ResolveConvertedConstants(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto computation : module->MakeComputationPostOrder(execution_threads)) {
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kConstant) {
continue;
}
if (!Shape::Equal().MinorToMajorOnlyInLayout()(hlo->literal().shape(),
hlo->shape())) {
TF_ASSIGN_OR_RETURN(auto converted_literal,
hlo->literal().ConvertToShape(hlo->shape()));
auto new_constant = computation->AddInstruction(
HloInstruction::CreateConstant(std::move(converted_literal)));
UpdateLayout(new_constant->mutable_shape());
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_constant));
}
}
}
return absl::OkStatus();
}
absl::Status BFloat16Propagation::SkipNoopConversions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto computation : module->computations(execution_threads)) {
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kConvert) {
continue;
}
auto source = hlo->mutable_operand(0);
if (!ShapeUtil::Equal(source->shape(), hlo->shape())) {
continue;
}
const bool is_root = hlo == computation->root_instruction();
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(source));
if (is_root) {
computation->set_root_instruction(source);
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> BFloat16Propagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
consider_using_bfloat16_.clear();
instructions_visited_in_backward_pass_.clear();
computations_visited_in_backward_pass_.clear();
values_that_must_be_kept_as_f32_.clear();
caller_counts_.clear();
changes_to_bf16_.clear();
changed_ = false;
auto computations_topological_order =
module->MakeComputationPostOrder(execution_threads);
for (auto computation : computations_topological_order) {
for (auto inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kWhile) {
continue;
}
auto operand = inst->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * copy,
computation->DeepCopyInstructionWithCustomCopier(
operand, [](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
if (leaf->shape().element_type() != F32) {
return leaf;
}
return comp->AddInstruction(
HloInstruction::CreateConvert(leaf->shape(), leaf));
}));
TF_RETURN_IF_ERROR(operand->ReplaceUseWith(inst, copy));
}
}
TF_ASSIGN_OR_RETURN(dataflow_, HloDataflowAnalysis::Run(*module));
for (auto computation : computations_topological_order) {
for (auto inst : computation->MakeInstructionPostOrder()) {
if (InstructionIsCandidateForBF16Output(inst)) {
consider_using_bfloat16_.insert(inst);
}
}
}
for (auto comp_it = computations_topological_order.rbegin();
comp_it != computations_topological_order.rend(); ++comp_it) {
if (ContainsKey(computations_visited_in_backward_pass_, *comp_it)) {
continue;
}
auto insts = (*comp_it)->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it,
true);
}
computations_visited_in_backward_pass_.insert(*comp_it);
}
ResolveInconsistencyOfAliasingBuffers(module, execution_threads);
for (auto& change : changes_to_bf16_) {
auto inst = change.first;
if (ShouldKeepPrecisionUnchanged(inst)) {
auto users = inst->users();
bool is_root = inst == inst->parent()->root_instruction();
TF_ASSIGN_OR_RETURN(
HloInstruction * copy,
inst->parent()->DeepCopyInstructionWithCustomCopier(
inst, [&](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
if (!ContainsKey(change.second,
ShapeUtil::GetMutableSubshape(
inst->mutable_shape(), leaf_index))) {
return leaf;
}
auto converted_shape =
ShapeUtil::ChangeElementType(leaf->shape(), BF16);
UpdateLayout(&converted_shape);
return comp->AddInstruction(
HloInstruction::CreateConvert(converted_shape, leaf));
}));
for (auto user : users) {
TF_RETURN_IF_ERROR(inst->ReplaceUseWithDifferentShape(user, copy));
}
if (is_root) {
inst->parent()->set_root_instruction(copy,
true);
}
continue;
}
for (const auto& entry : change.second) {
auto subshape = entry.first;
CHECK_EQ(subshape->element_type(), F32);
subshape->set_element_type(BF16);
UpdateLayout(subshape);
changed_ = true;
}
}
auto clean_up = [this, module, &execution_threads]() {
TF_RETURN_IF_ERROR(SkipNoopConversions(module, execution_threads));
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(
tuple_simplifier.Run(module, execution_threads).status());
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
return absl::OkStatus();
};
if (!changed_) {
TF_RETURN_IF_ERROR(clean_up());
return false;
}
TF_RETURN_IF_ERROR(ResolveInconsistentFusions(module, execution_threads));
TF_RETURN_IF_ERROR(ResolveConvertedConstants(module, execution_threads));
TF_RETURN_IF_ERROR(clean_up());
return true;
}
PrimitiveType BFloat16Propagation::OutputTypeAfterChange(
HloInstruction* hlo, const ShapeIndex& index) const {
Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index);
const PrimitiveType type_on_hlo = subshape->element_type();
if (type_on_hlo != F32) {
return type_on_hlo;
}
auto it = changes_to_bf16_.find(hlo);
if (it == changes_to_bf16_.end()) {
return type_on_hlo;
}
return ContainsKey(it->second, subshape) ? BF16 : F32;
}
PrimitiveType BFloat16Propagation::ValueTypeAfterChange(
const HloValue* value) const {
auto hlo = value->defining_instruction();
const auto& position = value->defining_position();
return OutputTypeAfterChange(hlo, position.index);
}
void BFloat16Propagation::AddToOrRemoveFromBF16ChangeSet(
HloInstruction* hlo, const ShapeIndex& index, PrimitiveType target_type) {
if (target_type == BF16) {
auto& entry = changes_to_bf16_[hlo];
entry.emplace(ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index),
index);
} else {
CHECK_EQ(target_type, F32);
auto it = changes_to_bf16_.find(hlo);
if (it == changes_to_bf16_.end()) {
return;
}
it->second.erase(
ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index));
if (it->second.empty()) {
changes_to_bf16_.erase(it);
}
}
}
} | #include "xla/service/bfloat16_propagation.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
class TestBFloat16Support : public FloatSupport {
public:
TestBFloat16Support() : FloatSupport(BF16) {}
~TestBFloat16Support() override {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return true;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return true;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return true;
}
bool EffectiveOperandPrecisionIsLowPrecision(
const HloInstruction& hlo, int64_t operand_index) const override {
return hlo.opcode() == HloOpcode::kDot;
}
};
class BFloat16PropagationTest : public HloTestBase {
protected:
BFloat16PropagationTest()
: HloTestBase(false,
true) {}
bool PropagatePrecision(HloModule* module) {
TestBFloat16Support bfloat16_support;
BFloat16Propagation propagation(&bfloat16_support);
absl::StatusOr<bool> result = propagation.Run(module);
EXPECT_IS_OK(result.status());
return result.value();
}
bool OutputsBF16(const HloInstruction* inst) {
if (inst->shape().element_type() == BF16) {
return true;
}
return inst->user_count() == 1 &&
inst->users()[0]->opcode() == HloOpcode::kConvert &&
inst->users()[0]->shape().element_type() == BF16;
}
std::unique_ptr<HloInstruction> CreateDot(const Shape& shape,
HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
DefaultPrecisionConfig(2));
}
};
TEST_F(BFloat16PropagationTest, PropagateThroughSelectButNotAdd) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* c =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, b));
HloInstruction* pred = builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {2, 4}), a, b, ComparisonDirection::kEq));
HloInstruction* sel = builder.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kSelect, pred, c, add1));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), sel, {1, 0}));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), xpose, a));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {4, 4}), HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(sel));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_FALSE(OutputsBF16(add0));
EXPECT_FALSE(OutputsBF16(a));
EXPECT_FALSE(OutputsBF16(b));
EXPECT_FALSE(OutputsBF16(c));
}
TEST_F(BFloat16PropagationTest, PropagateThroughMaxPoolReduceWindow) {
auto module = CreateNewVerifiedModule();
auto sub_builder = HloComputation::Builder("max");
HloInstruction* p0 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "a"));
HloInstruction* p1 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "b"));
sub_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, p0, p1));
auto max_computation = module->AddEmbeddedComputation(sub_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* c =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "c"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
HloInstruction* rw =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, add,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32))),
window, max_computation));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), c, {1, 0}));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), xpose, rw));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {4, 4}), HloOpcode::kAdd, dot, dot));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(rw));
}
TEST_F(BFloat16PropagationTest, DoNotChangeAllReduce) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
auto rb = HloComputation::Builder(TestName());
rb.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd,
rb.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0")),
rb.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"))));
auto reduction = module->AddEmbeddedComputation(rb.Build());
HloInstruction* all_reduce =
builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({shape, shape}), {a, b}, reduction,
CollectiveDeviceList(), false,
1, false));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, all_reduce, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, all_reduce, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, gte0, gte1));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
}
TEST_F(BFloat16PropagationTest, ConvertConstantLiteral) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
Array2D<float> array_a(4, 4);
array_a.FillUnique(1.0f);
Array2D<float> array_b(4, 4);
array_b.FillUnique(10.0f);
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_a)));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_b)));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, a, b));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(dot->operand(0)));
EXPECT_TRUE(OutputsBF16(dot->operand(1)));
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kConstant);
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::ConvertF32ToBF16(LiteralUtil::CreateFromArray(array_a)),
dot->operand(0)->literal()));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::ConvertF32ToBF16(LiteralUtil::CreateFromArray(array_b)),
dot->operand(1)->literal()));
}
TEST_F(BFloat16PropagationTest, PropagateThroughTuples) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, a));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, b, b));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), add1, {1, 0}));
HloInstruction* tuple0 =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1, add2}));
HloInstruction* tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({tuple0, xpose}));
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(xpose->shape(), tuple1, 1));
HloInstruction* rhs =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
add0->shape(),
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
tuple0->shape(), tuple1, 0)),
0));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), lhs, rhs));
HloInstruction* output_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({dot, add2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), output_tuple);
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(add0));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_FALSE(OutputsBF16(add2));
}
TEST_F(BFloat16PropagationTest, SameValueReferencedTwice) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, a));
HloInstruction* lhs = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), add1, {1, 0}));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(add1->shape(), tuple, 1));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), lhs, rhs));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_TRUE(OutputsBF16(lhs));
}
TEST_F(BFloat16PropagationTest, DoNotChangeComputationRoot) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, add, add));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add, dot}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), tuple);
EXPECT_FALSE(OutputsBF16(add));
}
TEST_F(BFloat16PropagationTest, PropagateThroughFusion) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f0 = HloComputation::Builder("fusion0");
HloInstruction* a_f0 =
builder_f0.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f0 =
builder_f0.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* tuple_f0 =
builder_f0.AddInstruction(HloInstruction::CreateTuple({a_f0, b_f0}));
auto comp_f0 = module->AddEmbeddedComputation(builder_f0.Build());
auto fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_f0->shape(), HloInstruction::FusionKind::kCustom, {add, add},
comp_f0));
auto builder_f1 = HloComputation::Builder("fusion1");
HloInstruction* p_f1 = builder_f1.AddInstruction(
HloInstruction::CreateParameter(0, tuple_f0->shape(), "param"));
HloInstruction* a_f1 = builder_f1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p_f1, 0));
HloInstruction* b_f1 = builder_f1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p_f1, 1));
HloInstruction* dot = builder_f1.AddInstruction(CreateDot(shape, a_f1, b_f1));
auto comp_f1 = module->AddEmbeddedComputation(builder_f1.Build());
auto fusion1 = builder.AddInstruction(HloInstruction::CreateFusion(
dot->shape(), HloInstruction::FusionKind::kCustom, {fusion0}, comp_f1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), fusion1);
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(a_f0));
EXPECT_TRUE(OutputsBF16(b_f0));
EXPECT_TRUE(OutputsBF16(a_f1));
EXPECT_TRUE(OutputsBF16(b_f1));
}
TEST_F(BFloat16PropagationTest, FusionWithBitcastConvertRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape u32_shape = ShapeUtil::MakeShape(U32, {4, 4});
Shape f32_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, u32_shape, "param"));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f = builder_f.AddInstruction(
HloInstruction::CreateParameter(0, u32_shape, "a"));
HloInstruction* bc_f = builder_f.AddInstruction(
HloInstruction::CreateBitcastConvert(f32_shape, a_f));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
f32_shape, HloInstruction::FusionKind::kLoop, {param}, comp_f));
auto dot = builder.AddInstruction(CreateDot(f32_shape, fusion, fusion));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_EQ(bc_f->shape(), f32_shape);
EXPECT_TRUE(OutputsBF16(bc_f));
}
TEST_F(BFloat16PropagationTest, DiscardFusionInternalBF16Changes) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add_f = builder_f.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a_f, b_f));
HloInstruction* dot_f =
builder_f.AddInstruction(CreateDot(shape, add_f, add_f));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
dot_f->shape(), HloInstruction::FusionKind::kCustom, {add, add}, comp_f));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), fusion);
}
TEST_F(BFloat16PropagationTest, ConvertTupleFusionElementIfUsedByAdd) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f = HloComputation::Builder("fusion0");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add_f = builder_f.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a_f, b_f));
HloInstruction* tuple_f =
builder_f.AddInstruction(HloInstruction::CreateTuple({a_f, add_f}));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_f->shape(), HloInstruction::FusionKind::kCustom, {add, add},
comp_f));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, gte0, gte1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(gte0));
EXPECT_TRUE(OutputsBF16(gte1));
EXPECT_FALSE(OutputsBF16(a_f));
EXPECT_FALSE(OutputsBF16(b_f));
EXPECT_TRUE(OutputsBF16(add_f));
auto new_fusion_root = comp_f->root_instruction();
EXPECT_EQ(new_fusion_root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(new_fusion_root->operand(1), add_f);
EXPECT_EQ(new_fusion_root->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_TRUE(OutputsBF16(new_fusion_root->operand(0)));
}
TEST_F(BFloat16PropagationTest, PropagateThroughSimpleWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
auto cond_dot =
builder_cond.AddInstruction(CreateDot(shape, cond_param, cond_param));
auto cond_root = builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, shape, "body_param"));
auto body_dot =
builder_body.AddInstruction(CreateDot(shape, body_param, body_param));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(shape, cond, body, add));
auto dot = builder.AddInstruction(CreateDot(shape, while_hlo, while_hlo));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(
ShapeUtil::Equal(cond_root->shape(), ShapeUtil::MakeShape(PRED, {})));
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(body_dot));
EXPECT_TRUE(OutputsBF16(body_param));
EXPECT_TRUE(OutputsBF16(cond_param));
EXPECT_FALSE(OutputsBF16(dot));
}
TEST_F(BFloat16PropagationTest,
ConditionPreventsPropagationForFusionInsideWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1, 1}), cond_param, {0, 0}, {1, 1},
{1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1, 1}), cond_param, {1, 1}, {2, 2},
{1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, shape, "body_param"));
auto body_transpose = builder_body.AddInstruction(
HloInstruction::CreateTranspose(shape, body_param, {0, 1}));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
builder_f.AddInstruction(HloInstruction::CreateTranspose(shape, a_f, {0, 1}));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto body_fusion = builder_body.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {body_transpose}, comp_f));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(shape, cond, body, add));
auto dot = builder.AddInstruction(CreateDot(shape, while_hlo, while_hlo));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_FALSE(OutputsBF16(add));
EXPECT_FALSE(OutputsBF16(body_fusion));
EXPECT_FALSE(OutputsBF16(body_param));
EXPECT_FALSE(OutputsBF16(body_transpose));
EXPECT_FALSE(OutputsBF16(a_f));
}
TEST_F(BFloat16PropagationTest, PropagateThroughTupleWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, tuple->shape(), "cond_param"));
auto cond_lhs = builder_cond.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond_param, 0));
auto cond_rhs = builder_cond.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond_param, 1));
auto cond_add_rhs = builder_cond.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, cond_rhs, cond_rhs));
auto cond_dot =
builder_cond.AddInstruction(CreateDot(shape, cond_lhs, cond_add_rhs));
builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, tuple->shape(), "body_param"));
auto body_lhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
auto body_rhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 1));
auto body_dot1 =
builder_body.AddInstruction(CreateDot(shape, body_lhs, body_rhs));
auto body_dot2 =
builder_body.AddInstruction(CreateDot(shape, body_rhs, body_lhs));
auto body_transpose = builder_body.AddInstruction(
HloInstruction::CreateTranspose(shape, body_dot2, {0, 1}));
builder_body.AddInstruction(
HloInstruction::CreateTuple({body_dot1, body_transpose}));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(tuple->shape(), cond, body, tuple));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_hlo, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_hlo, 1));
auto dot = builder.AddInstruction(CreateDot(shape, lhs, rhs));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(lhs));
EXPECT_FALSE(OutputsBF16(rhs));
EXPECT_TRUE(OutputsBF16(body_dot1));
EXPECT_TRUE(OutputsBF16(body_lhs));
EXPECT_FALSE(OutputsBF16(body_rhs));
EXPECT_FALSE(OutputsBF16(body_dot2));
EXPECT_FALSE(OutputsBF16(body_transpose));
EXPECT_TRUE(OutputsBF16(cond_lhs));
EXPECT_FALSE(OutputsBF16(cond_rhs));
EXPECT_TRUE(OutputsBF16(add0));
EXPECT_FALSE(OutputsBF16(add1));
}
TEST_F(BFloat16PropagationTest, DoNotPropagateWhilesCallingSameComputation) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* tuple0 =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({add2, add3}));
auto builder_cond0 = HloComputation::Builder("cond0");
auto cond0_param = builder_cond0.AddInstruction(
HloInstruction::CreateParameter(0, tuple0->shape(), "cond0_param"));
auto cond0_lhs = builder_cond0.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond0_param, 0));
auto cond0_rhs = builder_cond0.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond0_param, 1));
auto cond0_add_rhs =
builder_cond0.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, cond0_rhs, cond0_rhs));
auto cond0_dot =
builder_cond0.AddInstruction(CreateDot(shape, cond0_lhs, cond0_add_rhs));
builder_cond0.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond0.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond0.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond0_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond0.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond0.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond0_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond0 = module->AddEmbeddedComputation(builder_cond0.Build());
auto builder_cond1 = HloComputation::Builder("cond1");
auto cond1_param = builder_cond1.AddInstruction(
HloInstruction::CreateParameter(0, tuple1->shape(), "cond1_param"));
auto cond1_lhs = builder_cond1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond1_param, 0));
auto cond1_rhs = builder_cond1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond1_param, 1));
auto cond1_add_lhs =
builder_cond1.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, cond1_lhs, cond1_lhs));
auto cond1_dot =
builder_cond1.AddInstruction(CreateDot(shape, cond1_add_lhs, cond1_rhs));
builder_cond1.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond1.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond1.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond1_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond1.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond1.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond1_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond1 = module->AddEmbeddedComputation(builder_cond1.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, tuple0->shape(), "body_param"));
auto body_lhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
auto body_rhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 1));
auto body_dot =
builder_body.AddInstruction(CreateDot(shape, body_lhs, body_rhs));
builder_body.AddInstruction(
HloInstruction::CreateTuple({body_dot, body_rhs}));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple0->shape(), cond0, body, tuple0));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple1->shape(), cond1, body, tuple1));
auto lhs = builder.AddInstruction(
CreateDot(shape,
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while0, 0)),
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while0, 1))));
auto rhs = builder.AddInstruction(
CreateDot(shape,
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while1, 0)),
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while1, 1))));
auto dot = builder.AddInstruction(CreateDot(shape, lhs, rhs));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_FALSE(OutputsBF16(body_dot));
EXPECT_FALSE(OutputsBF16(body_rhs));
EXPECT_FALSE(OutputsBF16(body_lhs));
EXPECT_FALSE(OutputsBF16(cond0_lhs));
EXPECT_FALSE(OutputsBF16(cond0_rhs));
EXPECT_FALSE(OutputsBF16(cond1_lhs));
EXPECT_FALSE(OutputsBF16(cond1_rhs));
EXPECT_TRUE(OutputsBF16(cond0_add_rhs));
EXPECT_TRUE(OutputsBF16(cond1_add_lhs));
EXPECT_EQ(computation->root_instruction(), dot);
}
TEST_F(BFloat16PropagationTest, NoopConversionRemoved) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "param"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, param, param));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, param, param));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 1));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte0));
HloInstruction* convert1 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte1));
HloInstruction* add2 = builder.AddInstruction(HloInstruction::CreateBinary(
bf16_shape, HloOpcode::kAdd, convert0, convert1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), add2);
EXPECT_EQ(add2->operand(0), add0);
EXPECT_EQ(add2->operand(1), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), BF16);
}
TEST_F(BFloat16PropagationTest, TupleDomain) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* a_trans =
builder.AddInstruction(HloInstruction::CreateTranspose(shape, a, {0, 1}));
HloInstruction* b_trans =
builder.AddInstruction(HloInstruction::CreateTranspose(shape, b, {0, 1}));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({a_trans, b_trans}));
HloInstruction* domain = builder.AddInstruction(
HloInstruction::CreateDomain(tuple->shape(), tuple, nullptr, nullptr));
HloInstruction* a_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 0));
HloInstruction* b_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, a_gte, b_gte));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_EQ(ShapeUtil::GetTupleElementShape(domain->shape(), 0).element_type(),
BF16);
EXPECT_EQ(ShapeUtil::GetTupleElementShape(domain->shape(), 1).element_type(),
BF16);
EXPECT_TRUE(OutputsBF16(a_trans));
EXPECT_TRUE(OutputsBF16(b_trans));
EXPECT_TRUE(OutputsBF16(a_gte));
EXPECT_TRUE(OutputsBF16(b_gte));
EXPECT_FALSE(OutputsBF16(a));
EXPECT_FALSE(OutputsBF16(b));
}
TEST_F(BFloat16PropagationTest, TupleDomainNoPropagation) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
HloInstruction* domain = builder.AddInstruction(
HloInstruction::CreateDomain(param->shape(), param, nullptr, nullptr));
HloInstruction* a_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 0));
HloInstruction* b_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 1));
HloInstruction* a_trans = builder.AddInstruction(
HloInstruction::CreateTranspose(shape, a_gte, {0, 1}));
HloInstruction* b_trans = builder.AddInstruction(
HloInstruction::CreateTranspose(shape, b_gte, {0, 1}));
HloInstruction* dot =
builder.AddInstruction(CreateDot(shape, a_trans, b_trans));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(a_trans));
EXPECT_TRUE(OutputsBF16(b_trans));
EXPECT_FALSE(OutputsBF16(a_gte));
EXPECT_FALSE(OutputsBF16(b_gte));
EXPECT_FALSE(OutputsBF16(domain));
EXPECT_FALSE(OutputsBF16(param));
}
TEST_F(BFloat16PropagationTest, ConditionalSeparateBranchOperands) {
const std::string module_str = R"(
HloModule module
true_branch {
true_param = f32[4096,4096] parameter(0)
ROOT max = f32[4096,4096] maximum(true_param, true_param)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
ROOT add = f32[4096,4096] add(false_param, false_param)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
param1 = f32[4096,4096] parameter(1)
copy0 = f32[4096,4096] copy(param0)
copy1 = f32[4096,4096] copy(param1)
param2 = pred[] parameter(2)
conditional = f32[4096,4096] conditional(param2, copy0, copy1),
true_computation=true_branch, false_computation=false_branch
ROOT dot = f32[4096,4096] dot(conditional, conditional),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(PropagatePrecision(module.get()));
auto cond = FindInstruction(module.get(), "conditional");
auto copy0 = FindInstruction(module.get(), "copy0");
auto copy1 = FindInstruction(module.get(), "copy1");
EXPECT_TRUE(OutputsBF16(cond));
EXPECT_TRUE(OutputsBF16(copy0));
EXPECT_FALSE(OutputsBF16(copy1));
}
TEST_F(BFloat16PropagationTest, ConditionalSharedBranchOperands) {
const std::string module_str = R"(
HloModule module
true_branch {
true_param = f32[4096,4096] parameter(0)
ROOT max = f32[4096,4096] maximum(true_param, true_param)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
ROOT add = f32[4096,4096] add(false_param, false_param)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
copy0 = f32[4096,4096] copy(param0)
param1 = pred[] parameter(1)
conditional = f32[4096,4096] conditional(param1, copy0, copy0),
true_computation=true_branch, false_computation=false_branch
ROOT dot = f32[4096,4096] dot(conditional, conditional),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(PropagatePrecision(module.get()));
auto cond = FindInstruction(module.get(), "conditional");
auto copy0 = FindInstruction(module.get(), "copy0");
EXPECT_TRUE(OutputsBF16(cond));
EXPECT_FALSE(OutputsBF16(copy0));
}
TEST_F(BFloat16PropagationTest, ConditionalAliasingOutputs) {
const std::string module_str = R"(
HloModule module
true_branch {
true_param = f32[4096,4096] parameter(0)
max = f32[4096,4096] maximum(true_param, true_param)
ROOT true_tuple = (f32[4096,4096], f32[4096,4096]) tuple(max, max)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
min = f32[4096,4096] minimum(false_param, false_param)
max2 = f32[4096,4096] maximum(false_param, false_param)
ROOT false_tuple = (f32[4096,4096], f32[4096,4096]) tuple(min, max2)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
copy0 = f32[4096,4096] copy(param0)
param1 = pred[] parameter(1)
conditional = (f32[4096,4096], f32[4096,4096]) conditional(param1, copy0, copy0),
true_computation=true_branch, false_computation=false_branch
gte0 = f32[4096,4096] get-tuple-element(conditional), index=0
gte1 = f32[4096,4096] get-tuple-element(conditional), index=1
dot = f32[4096,4096] dot(gte0, gte1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[4096,4096], f32[4096,4096]) tuple(dot, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_FALSE(PropagatePrecision(module.get()));
}
TEST_F(BFloat16PropagationTest, DynamicUpdateSlice) {
const std::string module_str = R"(
HloModule Module
ENTRY main {
param = f32[128,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
dynamic-update-slice = f32[128,128] dynamic-update-slice(param, broadcast.6, constant.3, constant.3)
ROOT dot = f32[128,128] dot(dynamic-update-slice, dynamic-update-slice), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_FALSE(PropagatePrecision(module.get()));
HloInstruction* dus = module->entry_computation()->GetInstructionWithName(
"dynamic-update-slice");
EXPECT_FALSE(OutputsBF16(dus));
}
TEST_F(BFloat16PropagationTest, ConditionalGTEWithFusion) {
const std::string module_str = R"(
HloModule module
%add.0 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
%add.1 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
%add.2 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
%add.3 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
true_branch {
true_param = f32[4096,4096] parameter(0)
constant.1 = f32[4096,4096] constant(0)
add0 = f32[4096,4096] fusion(true_param,true_param), kind=kLoop, calls=add.0
constant.2 = f32[4096,4096] constant(0)
ROOT tuple.2 = (f32[4096,4096], f32[4096,4096], f32[4096,4096]) tuple(true_param,add0,constant.2)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
add3 = f32[4096,4096] fusion(false_param,false_param), kind=kLoop, calls=add.1
constant.1 = f32[4096,4096] constant(0)
ROOT tuple.2 = (f32[4096,4096], f32[4096,4096], f32[4096,4096]) tuple(add3, add3,constant.1)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
copy0 = f32[4096,4096] copy(param0)
param1 = pred[] parameter(1)
conditional = (f32[4096,4096], f32[4096,4096], f32[4096,4096]) conditional(param1, param0, copy0),
true_computation=true_branch, false_computation=false_branch
gte = f32[4096,4096] get-tuple-element(conditional), index=0
gte1 = f32[4096,4096] get-tuple-element(conditional), index=1
gte2 = f32[4096,4096] get-tuple-element(conditional), index=2
add2 = f32[4096,4096] fusion(gte, gte1), kind=kLoop, calls=add.2
ROOT add3 = f32[4096,4096] fusion(add2, gte2), kind=kLoop, calls=add.3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(PropagatePrecision(module.get()));
VLOG(2) << module->ToString() << "\n";
EXPECT_TRUE(HloVerifier(false,
true)
.Run(module.get())
.status()
.ok());
auto gte = FindInstruction(module.get(), "gte");
auto gte1 = FindInstruction(module.get(), "gte1");
auto gte2 = FindInstruction(module.get(), "gte2");
EXPECT_FALSE(OutputsBF16(gte));
EXPECT_FALSE(OutputsBF16(gte1));
EXPECT_TRUE(OutputsBF16(gte2));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f2aec7d9-511e-42b4-a9ad-4688d7f8d448 | cpp | tensorflow/tensorflow | while_loop_analysis | third_party/xla/xla/service/while_loop_analysis.cc | third_party/xla/xla/service/while_loop_analysis_test.cc | #include "xla/service/while_loop_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
using std::nullopt;
using std::optional;
namespace m = match;
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
static optional<int64_t> GetGTEOperandIndex(const HloInstruction* instr,
const HloInstruction* gte_operand) {
VLOG(2) << "GetGTEOperandIndex(" << instr->ToString()
<< ", GTE Operand: " << gte_operand->ToString() << ")";
optional<int64_t> tuple_idx;
for (const HloInstruction* operand : instr->operands()) {
if (Match(operand, m::Constant())) {
continue;
}
auto possibly_gte_operand = operand;
if (operand->opcode() == HloOpcode::kCopy) {
possibly_gte_operand = operand->operand(0);
}
if (possibly_gte_operand->opcode() != HloOpcode::kGetTupleElement) {
return nullopt;
}
if (!Match(possibly_gte_operand,
m::GetTupleElement(m::Op().Is(gte_operand))) &&
!Match(possibly_gte_operand,
m::GetTupleElement(m::CustomCall(m::Op().Is(gte_operand))))) {
return nullopt;
}
int64_t operand_tuple_idx = possibly_gte_operand->tuple_index();
if (!tuple_idx.has_value()) {
tuple_idx = operand_tuple_idx;
} else {
if (operand_tuple_idx != tuple_idx) {
return nullopt;
}
}
}
return tuple_idx;
}
std::vector<const HloInstruction*> GetAuxiliaryLoopInductionVars(
const HloInstruction* while_op) {
std::vector<const HloInstruction*> aux_ind_gte;
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* while_body_param = while_body->parameter_instruction(0);
VLOG(2) << "Aux Induction Variables for loop:" << while_op->ToShortString();
VLOG(2) << "the parameter instr:" << while_body_param->ToShortString();
VLOG(2) << "the parameter user count:" << while_body_param->users().size();
if (while_body_param == nullptr) return aux_ind_gte;
std::map<int64_t, const HloInstruction*> extractions;
for (const HloInstruction* indx_instr : while_body_param->users()) {
if (indx_instr->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
auto it = extractions.find(indx_instr->tuple_index());
if (it != extractions.end()) {
it->second = nullptr;
VLOG(2) << "two extractions at same index:" << indx_instr->ToString();
} else {
extractions.insert(std::make_pair(indx_instr->tuple_index(), indx_instr));
VLOG(2) << "inserting extraction :" << indx_instr->ToString();
}
}
VLOG(2) << "total extractions size:" << extractions.size() << std::endl;
if (extractions.empty()) {
return aux_ind_gte;
}
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body root is not a tuple:" << while_body_root->ToString();
return aux_ind_gte;
}
int64_t index = -1;
std::map<int64_t, const HloInstruction*> insertions;
for (const HloInstruction* operand : while_body_root->operands()) {
index++;
if (!operand->IsConstant()) {
auto it = insertions.find(index);
if (it != insertions.end()) {
it->second = nullptr;
VLOG(2) << "two insertions at same index:" << operand->ToString();
} else {
insertions.insert(std::make_pair(index, operand));
VLOG(2) << "inserting insertions:" << operand->ToString();
}
}
}
if (insertions.empty()) {
return aux_ind_gte;
}
std::map<int64_t, std::pair<const HloInstruction*, const HloInstruction*>>
candidate_pairs;
for (; index >= 0; --index) {
const HloInstruction *ext, *inst;
ext = (extractions.find(index) != extractions.end())
? extractions.find(index)->second
: nullptr;
inst = (insertions.find(index) != insertions.end())
? insertions.find(index)->second
: nullptr;
if (ext != nullptr && inst != nullptr) {
if (ext != inst) {
candidate_pairs.insert(
std::make_pair(index, std::make_pair(ext, inst)));
}
}
}
VLOG(2) << "total candidate pairs:" << candidate_pairs.size() << std::endl;
const auto add_dependencies = [](const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs) {
HloInstruction* non_const_operand = nullptr;
int num_non_constants = 0;
for (HloInstruction* operand : hlo->operands()) {
if (!operand->IsConstant()) {
num_non_constants++;
non_const_operand = operand;
}
}
if (num_non_constants == 1 &&
(hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kAdd ||
hlo->opcode() == HloOpcode::kMultiply ||
hlo->opcode() == HloOpcode::kDivide ||
hlo->opcode() == HloOpcode::kSubtract)) {
inputs->push_back(non_const_operand);
}
};
std::unique_ptr<HloReachabilityMap> hrm =
HloReachabilityMap::BuildWithRestrictions(
while_body,
absl::FunctionRef<void(const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs)>(
add_dependencies));
for (auto candidates : candidate_pairs) {
VLOG(2) << "are reachable?:" << (candidates.second.first)->ToString()
<< "*************" << (candidates.second.second)->ToString()
<< std::endl;
if (hrm->IsReachable(candidates.second.first, candidates.second.second)) {
aux_ind_gte.push_back(candidates.second.first);
VLOG(2) << "YES";
} else {
VLOG(2) << "NO";
}
}
VLOG(2) << "num auxiliary candidates :" << aux_ind_gte.size();
return aux_ind_gte;
}
optional<int64_t> GetLoopInductionVarTupleIdx(const HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
VLOG(2) << "Finding induction variable for loop "
<< while_op->ToShortString();
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_param = while_cond->parameter_instruction(0);
optional<int64_t> indvar_tuple_idx =
GetGTEOperandIndex(while_cond_root, while_cond_param);
if (!indvar_tuple_idx) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple &&
while_body_root->opcode() != HloOpcode::kCustomCall) {
VLOG(2) << "While body's root is not a tuple or custom-call instruction: "
<< while_body_root->ToString();
return nullopt;
}
const HloInstruction* while_body_inc;
if (while_body_root->opcode() == HloOpcode::kTuple) {
while_body_inc = while_body_root->operand(*indvar_tuple_idx);
} else {
if (while_body_root->operand_count() == 1 &&
while_body_root->operand(0)->opcode() == HloOpcode::kTuple) {
auto* while_body_root_input_tuple = while_body_root->operand(0);
if (*indvar_tuple_idx >= while_body_root_input_tuple->operand_count()) {
VLOG(2) << "Cannot find the induction variable in the output root "
"custom-call "
<< while_body_root->ToString();
return std::nullopt;
}
while_body_inc = while_body_root_input_tuple->operand(*indvar_tuple_idx);
} else {
if (*indvar_tuple_idx >= while_body_root->operand_count()) {
VLOG(2) << "Cannot find the induction variable in the output root "
"custom-call "
<< while_body_root->ToString();
return std::nullopt;
}
while_body_inc = while_body_root->operand(*indvar_tuple_idx);
}
}
auto* while_body_param = while_body->parameter_instruction(0);
optional<int64_t> while_body_indvar_tuple_idx =
GetGTEOperandIndex(while_body_inc, while_body_param);
if (!while_body_indvar_tuple_idx) {
VLOG(2)
<< "Induction variable not found in while body increment instruction: "
<< while_body_inc->ToString();
return nullopt;
}
if (while_body_indvar_tuple_idx != indvar_tuple_idx) {
VLOG(2) << "Tuple index of induction variable does not match between loop "
"condition ("
<< *indvar_tuple_idx << ") and while body ("
<< *while_body_indvar_tuple_idx << ")";
return nullopt;
}
auto* while_init = while_op->operand(0);
if (while_init->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While init expected to be a tuple: " << while_init->ToString();
return nullopt;
}
VLOG(2) << "Induction variable's tuple index: " << *indvar_tuple_idx;
return indvar_tuple_idx;
}
optional<int64_t> CheckedAdd(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa + bb);
if (a >= 0 == b >= 0 && result >= 0 != a >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> CheckedSubtract(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa - bb);
if (a >= 0 != b >= 0 && result >= 0 == b >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> MatchTrivialLoopTripCount(const HloInstruction* while_op,
int64_t indvar_tuple_idx,
const Literal& indvar_init) {
optional<int64_t> indvar_init_val =
LiteralUtil::LiteralAsScalarInt64(indvar_init);
if (!indvar_init_val) {
VLOG(2) << "Pattern-match failed: induction variable init is not a "
"constant scalar representable as an int64_t: "
<< indvar_init.ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
HloInstruction* while_body_indvar_update;
if (while_body_root->opcode() == HloOpcode::kCustomCall) {
if (while_body_root->operand_count() == 1 &&
while_body_root->operand(0)->opcode() == HloOpcode::kTuple) {
auto* while_body_root_input_tuple = while_body_root->mutable_operand(0);
while_body_indvar_update =
while_body_root_input_tuple->mutable_operand(indvar_tuple_idx);
} else {
while_body_indvar_update =
while_body_root->mutable_operand(indvar_tuple_idx);
}
} else {
while_body_indvar_update =
while_body_root->mutable_operand(indvar_tuple_idx);
}
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
int64_t trip_count_step = 0;
if (!Match(while_body_indvar_update,
m::AddAnyOrder(m::Op().Is(while_body_indvar),
m::Op(&trip_count_increase_step_instr)))) {
if (trip_count_increase_step_instr == nullptr) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"updated by an add operation: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!trip_count_increase_step_instr->IsConstant() ||
!ShapeUtil::IsEffectiveScalar(
trip_count_increase_step_instr->shape())) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"incremented by constant: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.has_value()) {
VLOG(2)
<< "Pattern-match failed: trip count step is not an integral type: "
<< trip_count_increase_step_instr->shape().ToString();
return nullopt;
}
VLOG(2) << "Pattern-match for trip count step failed: "
<< trip_count_increase_step_instr->ToString();
}
trip_count_step = LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.value();
if (trip_count_step <= 0) {
VLOG(2) << "Pattern-match failed: trip count step is not a natural number: "
<< trip_count_step;
return nullopt;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
HloInstruction* while_cond_bound = nullptr;
if (!Match(while_cond_root,
m::Op().WithBinaryOperandsAnyOrder(
m::Op().Is(while_cond_indvar),
m::ConstantEffectiveScalar(&while_cond_bound)))) {
VLOG(2) << "Pattern-match failed: while condition is not of the form "
"op(i, N) or op(N, i).";
return nullopt;
}
optional<int64_t> while_cond_bound_val =
LiteralUtil::LiteralAsScalarInt64(while_cond_bound->literal());
if (!while_cond_bound_val) {
VLOG(2) << "Pattern-match failed: while condition induction variable is "
"not a constant scalar representable as an int64_t.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLt)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i < N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (trips) {
const int64_t remainder = std::remainder(*trips, trip_count_step);
const int64_t div = std::floor(*trips / trip_count_step);
if (remainder == 0) {
return std::max(int64_t{0}, div);
}
trips = CheckedAdd(div, 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (*trips < *while_cond_bound_val) {
return std::max(int64_t{0}, *trips);
}
return std::max(int64_t{0}, div);
}
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLe)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i <= N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
trips = CheckedAdd(std::floor(*trips / trip_count_step), 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
return std::max<int64_t>(0, *trips);
}
VLOG(2) << "Pattern-match failed: while condition follows unknown pattern: "
<< while_cond_root->ToString();
return nullopt;
}
optional<int64_t> ComputeWhileLoopTripCount(const HloInstruction* while_op,
int64_t max_brute_force_iters) {
VLOG(2) << "Getting trip count for loop " << while_op->ToString();
optional<int64_t> indvar_tuple_idx = GetLoopInductionVarTupleIdx(while_op);
if (!indvar_tuple_idx) {
return nullopt;
}
HloEvaluator evaluator(0);
auto* while_init = while_op->operand(0);
auto* indvar_init = while_init->operand(*indvar_tuple_idx);
absl::StatusOr<Literal> indvar_init_result = evaluator.Evaluate(indvar_init);
if (!indvar_init_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable init, "
<< indvar_init_result.status() << ", " << indvar_init->ToString();
return nullopt;
}
Literal indvar_iter_val = std::move(indvar_init_result).value();
if (auto trip_count = MatchTrivialLoopTripCount(while_op, *indvar_tuple_idx,
indvar_iter_val)) {
return trip_count;
}
auto* while_body = while_op->while_body();
auto* while_body_indvar_update =
while_body->root_instruction()->operand(*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
for (int64_t trip_count = 0; trip_count != max_brute_force_iters + 1;
++trip_count) {
absl::StatusOr<Literal> result = evaluator.EvaluateWithSubstitutions(
while_cond_root, {{while_cond_indvar, &indvar_iter_val}});
if (!result.ok()) {
VLOG(2) << "Couldn't evaluate while cond: " << result.status();
return nullopt;
}
if (result.value().data<bool>() == absl::Span<const bool>{false}) {
VLOG(2) << "Loop has static trip count of " << trip_count;
return trip_count;
}
absl::StatusOr<Literal> indvar_next_result =
evaluator.EvaluateWithSubstitutions(
while_body_indvar_update, {{while_body_indvar, &indvar_iter_val}});
if (!indvar_next_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable update: "
<< indvar_next_result.status();
return nullopt;
}
indvar_iter_val = std::move(indvar_next_result).value();
}
VLOG(2) << "Loop has unknown trip count.";
return nullopt;
}
static HloInstruction* GetOnlyGTE(HloInstruction* inst) {
if (inst->user_count() != 1) {
return nullptr;
}
HloInstruction* user = inst->users().back();
if (user->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
return user;
}
optional<int64_t> ComputeWhileLoopTripCountUpperBound(
const HloInstruction* while_op) {
auto exact_trip_count = ComputeWhileLoopTripCount(while_op);
if (exact_trip_count) {
VLOG(2) << "Loop has exact trip count.";
return exact_trip_count;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_param = while_cond->parameter_instruction(0);
auto* cond_gte = GetOnlyGTE(while_cond_param);
if (!cond_gte) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(3) << "While body's root is not a tuple instruction: "
<< while_body_root->ToString();
return nullopt;
}
int64_t indvar_index = cond_gte->tuple_index();
auto* while_body_indvar = while_body_root->operand(indvar_index);
if (while_body_indvar->opcode() != HloOpcode::kConstant) {
VLOG(3) << "While body does not set the IV to a constant: "
<< while_body_indvar->ToString();
return nullopt;
}
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
auto new_param = HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({cond_gte->shape()}), "temp");
replacements[cond_gte] =
HloInstruction::CreateGetTupleElement(new_param.get(), 0);
replacements[while_cond_param] = std::move(new_param);
auto new_module = std::make_unique<HloModule>("temp_mod", HloModuleConfig{});
auto* new_computation = new_module->AddEmbeddedComputation(
while_cond->CloneWithReplacements(&replacements));
HloEvaluator evaluator(0);
Literal fake_input = Literal::CreateFromShape(
new_computation->parameter_instruction(0)->shape());
TF_CHECK_OK(fake_input.CopyFrom(while_body_indvar->literal(),
{0},
{}));
absl::StatusOr<Literal> eval_result =
evaluator.Evaluate(*new_computation, {std::move(fake_input)});
if (!eval_result.ok()) {
VLOG(2) << "Couldn't evaluate while loop condition.";
return nullopt;
}
Literal cond_result_pred = std::move(eval_result.value());
CHECK(Shape::Equal().IgnoreLayout()(cond_result_pred.shape(),
ShapeUtil::MakeShape(PRED, {})));
bool cond_returns_true = cond_result_pred.GetFirstElement<bool>();
if (!cond_returns_true) {
VLOG(2) << "Upper bound on the trip count is 1";
return 1;
}
VLOG(2) << "Loop has no known upper bound on the trip count.";
return nullopt;
}
} | #include "xla/service/while_loop_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class WhileLoopAnalysisTest : public HloTestBase {
protected:
[[nodiscard]] absl::StatusOr<int64_t> MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir);
};
absl::StatusOr<int64_t> WhileLoopAnalysisTest::MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir) {
std::string hlo_string_template = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
index = s32[] get-tuple-element(p_body), index=1
one = s32[] constant({{STEP}})
inc = s32[] add(index, one)
ROOT root = (f32[2], s32[]) tuple(val, inc)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant({{LIMIT}})
ROOT result = pred[] compare(gte, const), direction={{COMP_DIR}}
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] constant({{INIT}})
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
std::string hlo_string =
absl::StrReplaceAll(hlo_string_template,
{{"{{INIT}}", absl::StrCat(init)},
{"{{LIMIT}}", absl::StrCat(limit)},
{"{{STEP}}", absl::StrCat(step)},
{"{{COMP_DIR}}", ComparisonDirectionToString(dir)}});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::optional<int64_t> trip_count = MatchTrivialLoopTripCount(
while_op, 1,
Cast<HloConstantInstruction>(
module->GetComputationWithName("entry")->GetInstructionWithName(
"param.1"))
->literal());
CHECK(trip_count.has_value());
return *trip_count;
}
TEST_F(WhileLoopAnalysisTest, SingleIterationUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 1);
}
TEST_F(WhileLoopAnalysisTest, SimpleLoopWithCustomCallNonTuple) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(loop_var.1), custom_call_target="CustomCallStart"
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(idx, output), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.5, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* while_op = m->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 5);
}
TEST_F(WhileLoopAnalysisTest, SimpleLoopWithCustomCall) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(loop_var.1), custom_call_target="CustomCallStart"
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
tuple = (s32[]{:T(128)}, s32[3]{0}) tuple(idx, output)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(tuple), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* while_op = m->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 5);
}
TEST_F(WhileLoopAnalysisTest, NoUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(42)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(ComputeWhileLoopTripCountUpperBound(while_op), std::nullopt);
}
int CalculateTripCount(int init, int limit, int step, ComparisonDirection dir) {
int trip_count = 0;
if (dir == ComparisonDirection::kLt) {
for (int i = init; i < limit; i += step) {
trip_count++;
}
} else if (dir == ComparisonDirection::kLe) {
for (int i = init; i <= limit; i += step) {
trip_count++;
}
} else {
LOG(FATAL) << "Unknown comparison direction: "
<< ComparisonDirectionToString(dir);
}
return trip_count;
}
TEST_F(WhileLoopAnalysisTest, ExactBoundTrivialTripCount) {
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLe));
}
TEST_F(WhileLoopAnalysisTest, NoAIVNoConstChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
val3 = s32[] get-tuple-element(p_body), index=2
add = s32[] add(val2, val3)
sub = s32[] subtract(add, val3)
ROOT root = (f32[2], s32[], s32[]) tuple(val1, add, sub)
}
condition {
p_cond = (f32[2], s32[], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
param.2 = s32[] parameter(2)
while_init = (f32[2], s32[], s32[]) tuple(param.0, param.1, param.2)
ROOT while = (f32[2], s32[], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVMultiChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const.1 = s32[] constant(42)
const.2 = s32[] constant(42)
const.3 = s32[] constant(42)
add = s32[] add(val2, const.1)
sub = s32[] subtract(add, const.2)
mul = s32[] multiply(sub, const.3)
ROOT root = (f32[2], s32[]) tuple(val1, mul)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
TEST_F(WhileLoopAnalysisTest, NoAIV) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
add = s32[] add(val2, val2)
const.1 = s32[] constant(42)
mul = s32[] multiply(add, const.1)
div = s32[] divide(mul, add)
ROOT root = (f32[2], s32[]) tuple(val1, div)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVNoChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
add = s32[] add(val2, const)
ROOT root = (f32[2], s32[]) tuple(val1, add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b08dd595-724f-4f37-ad2d-7ae6ec6c1b89 | cpp | tensorflow/tensorflow | name_uniquer | third_party/xla/xla/service/name_uniquer.cc | third_party/xla/xla/service/name_uniquer_test.cc | #include "xla/service/name_uniquer.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/primitive_util.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
bool IsAllowed(char character) {
auto c = static_cast<unsigned char>(character);
return (absl::ascii_isalnum(c) != 0) || c == '_' || c == '.' || c == '-';
}
}
NameUniquer::NameUniquer(const std::string& separator) {
CHECK(absl::c_all_of(separator, IsAllowed))
<< "separator should comprises allowed characters only";
separator_ = separator;
}
std::string NameUniquer::GetSanitizedName(absl::string_view name) {
if (name.empty()) {
return "";
}
std::string result(name);
char c = static_cast<unsigned char>(result[0]);
if (!absl::ascii_isalpha(c) && c != '_') {
result[0] = '_';
}
for (int i = 1, iter_limit = result.length(); i < iter_limit; i++) {
if (!IsAllowed(result[i])) {
result[i] = '_';
}
}
if (primitive_util::IsPrimitiveTypeName(result) && result != "tuple") {
result += "_";
}
if (absl::StartsWith(result, "__") && !absl::StartsWith(result, "__xla_")) {
result[0] = 'a';
}
return result;
}
std::string NameUniquer::GetUniqueName(absl::string_view prefix) {
std::string root =
GetSanitizedName(prefix.empty() ? "name" : std::string(prefix));
bool has_numeric_suffix = false;
int64_t numeric_suffix = 0;
size_t separator_index = root.rfind(separator_);
if (separator_index != std::string::npos && (separator_index > 0) &&
(separator_index < root.size() - separator_.size())) {
std::string after_suffix = root.substr(separator_index + separator_.size());
if (absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
has_numeric_suffix = true;
root = root.substr(0, separator_index);
} else {
numeric_suffix = 0;
}
}
SequentialIdGenerator& id_generator = generated_names_[root];
numeric_suffix = id_generator.RegisterId(numeric_suffix);
if (numeric_suffix == 0) {
return has_numeric_suffix ? absl::StrCat(root, separator_, 0) : root;
}
absl::StrAppend(&root, separator_, numeric_suffix);
return root;
}
} | #include "xla/service/name_uniquer.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using NameUniquerTest = ::testing::Test;
TEST_F(NameUniquerTest, SimpleUniquer) {
NameUniquer uniquer;
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo__1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo__2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar", uniquer.GetUniqueName("bar"));
EXPECT_EQ("foo__3", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar__1", uniquer.GetUniqueName("bar"));
EXPECT_EQ("qux", uniquer.GetUniqueName("qux"));
}
TEST_F(NameUniquerTest, DifferentSeparator) {
NameUniquer uniquer(".");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar", uniquer.GetUniqueName("bar"));
EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar"));
}
TEST_F(NameUniquerTest, NumericSuffixes) {
NameUniquer uniquer(".");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.55.1", uniquer.GetUniqueName("foo.55.1"));
EXPECT_EQ("foo.55.0", uniquer.GetUniqueName("foo.55.1"));
EXPECT_EQ("bar.1000", uniquer.GetUniqueName("bar.1000"));
EXPECT_EQ("bar.2000", uniquer.GetUniqueName("bar.2000"));
EXPECT_EQ("bar.-2000", uniquer.GetUniqueName("bar.-2000"));
EXPECT_EQ("bar.1", uniquer.GetUniqueName("bar.1"));
}
TEST_F(NameUniquerTest, PrefixHasSuffix) {
NameUniquer uniquer(".");
EXPECT_EQ("foo.11.0", uniquer.GetUniqueName("foo.11.0"));
EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
}
TEST_F(NameUniquerTest, Sanitize) {
NameUniquer uniquer("_");
EXPECT_EQ("foo", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo_1", uniquer.GetUniqueName("foo"));
EXPECT_EQ("foo.54", uniquer.GetUniqueName("foo.54"));
EXPECT_EQ("foo_54", uniquer.GetUniqueName("foo_54"));
EXPECT_EQ("foo_54.1", uniquer.GetUniqueName("foo_54.1"));
EXPECT_EQ("foo_2", uniquer.GetUniqueName("foo"));
EXPECT_EQ("bar_1000", uniquer.GetUniqueName("bar<1000"));
EXPECT_EQ("bar_2000", uniquer.GetUniqueName("bar<2000"));
EXPECT_EQ("bar_1", uniquer.GetUniqueName("bar_1"));
EXPECT_EQ("_10", uniquer.GetUniqueName(
".10"));
EXPECT_EQ("_10_1", uniquer.GetUniqueName(".10"));
EXPECT_EQ("_10_2", uniquer.GetUniqueName("_10"));
EXPECT_EQ("foobar_", uniquer.GetUniqueName("foobar_"));
EXPECT_EQ("foobar__1", uniquer.GetUniqueName("foobar_"));
}
TEST_F(NameUniquerTest, KeepNamesInRandomOrder) {
NameUniquer uniquer(".");
EXPECT_EQ("foo.11", uniquer.GetUniqueName("foo.11"));
EXPECT_EQ("foo.10", uniquer.GetUniqueName("foo.10"));
EXPECT_EQ("foo.1", uniquer.GetUniqueName("foo.1"));
EXPECT_EQ("foo.12", uniquer.GetUniqueName("foo.12"));
EXPECT_EQ("foo.3", uniquer.GetUniqueName("foo.3"));
}
TEST_F(NameUniquerTest, AvoidKeywords) {
NameUniquer uniquer(".");
EXPECT_EQ("f32_", uniquer.GetUniqueName("f32"));
EXPECT_EQ("s64_", uniquer.GetUniqueName("s64"));
EXPECT_EQ("pred_", uniquer.GetUniqueName("pred"));
EXPECT_NE(uniquer.GetUniqueName("__xla_").find("__xla_"), std::string::npos);
EXPECT_EQ(uniquer.GetUniqueName("__abx").find("__"), std::string::npos);
EXPECT_EQ("tuple", uniquer.GetUniqueName("tuple"));
EXPECT_EQ("F32", uniquer.GetUniqueName("F32"));
EXPECT_EQ("S32", uniquer.GetUniqueName("S32"));
EXPECT_EQ("Pred", uniquer.GetUniqueName("Pred"));
}
TEST_F(NameUniquerTest, DetectSeparator) {
NameUniquer uniquer;
EXPECT_EQ(uniquer.GetUniqueName("a__1"), "a__1");
EXPECT_EQ(uniquer.GetUniqueName("a"), "a");
EXPECT_EQ(uniquer.GetUniqueName("a"), "a__2");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/name_uniquer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/name_uniquer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ee9a979-55b8-4a9d-b483-99b126a7cb9a | cpp | tensorflow/tensorflow | convolution_pred_expander | third_party/xla/xla/service/convolution_pred_expander.cc | third_party/xla/xla/service/convolution_pred_expander_test.cc | #include "xla/service/convolution_pred_expander.h"
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace m = match;
bool ConvolutionPredExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return Match(instruction, m::Convolution(m::Op().WithElementType(PRED),
m::Op().WithElementType(PRED))
.WithElementType(PRED));
}
absl::StatusOr<HloInstruction*> ConvolutionPredExpander::ExpandInstruction(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
absl::InlinedVector<HloInstruction*, 2> new_operands;
absl::c_transform(instruction->operands(), std::back_inserter(new_operands),
[&](HloInstruction* operand) {
CHECK_EQ(operand->shape().element_type(), PRED);
return MakeConvertToHlo(operand, F16);
});
Shape new_shape = ShapeUtil::ChangeElementType(instruction->shape(), F16);
HloInstruction* new_instruction = computation->AddInstruction(
instruction->CloneWithNewOperands(new_shape, new_operands));
return MakeConvertToHlo(new_instruction, PRED);
}
} | #include "xla/service/convolution_pred_expander.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
using ConvolutionPredExpanderTest = HloTestBase;
TEST_F(ConvolutionPredExpanderTest, Match) {
std::string hlo_string = R"(HloModule convolution_pred
ENTRY convolution_computation {
input = pred[10,10]{1,0} parameter(0)
kernel = pred[10,10]{1,0} parameter(1)
ROOT conv = pred[10,10]{1,0} convolution(input, kernel), dim_labels=bf_io->bf
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ConvolutionPredExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Convolution(m::Op().WithElementType(F16),
m::Op().WithElementType(F16))
.WithElementType(F16))
.WithElementType(PRED)));
}
TEST_F(ConvolutionPredExpanderTest, NoMatch) {
std::string hlo_string = R"(HloModule convolution_s8
ENTRY convolution_computation {
input = s8[10,10]{1,0} parameter(0)
kernel = s8[10,10]{1,0} parameter(1)
ROOT conv = s8[10,10]{1,0} convolution(input, kernel), dim_labels=bf_io->bf
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ConvolutionPredExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_pred_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_pred_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc0b3645-c11e-4cfb-b58e-5f062405c65f | cpp | tensorflow/tensorflow | hlo_replication_analysis | third_party/xla/xla/service/hlo_replication_analysis.cc | third_party/xla/xla/service/hlo_replication_analysis_test.cc | #include "xla/service/hlo_replication_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::DetermineHloInstructionIsReplicated(
const HloInstruction* hlo, const ShapeIndex& index,
bool cross_partition_spmd,
const absl::flat_hash_map<const HloInstruction*, ShapeTree<HloReplication>>&
hlo_replication,
bool support_partial_replication) {
const auto merge_operand_replication = [&hlo_replication](
const HloInstruction* inst) {
HloReplication replication = HloReplication::ReplicatedOnAllDevices();
for (auto operand : inst->operands()) {
auto operand_it = hlo_replication.find(operand);
if (operand_it == hlo_replication.end()) {
replication = replication.Merge(HloReplication::UniqueOnAllDevices());
} else {
replication = replication.Merge(operand_it->second.element({}));
}
}
return replication;
};
if (hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kAllGather) {
HloReplication replication = merge_operand_replication(hlo);
if (replication.IsReplicatedOnAllDevices()) {
return replication;
}
if (!hlo->channel_id().has_value()) {
if (cross_partition_spmd) {
return replication;
}
if (hlo->replica_groups().empty() || hlo->replica_groups().size() == 1) {
return HloReplication::ReplicatedOnAllDevices();
}
if (support_partial_replication) {
std::vector<absl::Span<const int64_t>> device_sets;
for (const ReplicaGroup& replica_group : hlo->replica_groups()) {
device_sets.push_back(replica_group.replica_ids());
}
return HloReplication::PartiallyReplicated(device_sets);
} else {
return HloReplication::UniqueOnAllDevices();
}
} else {
bool global_id;
if (hlo->opcode() == HloOpcode::kAllReduce) {
global_id = Cast<HloAllReduceInstruction>(hlo)->use_global_device_ids();
} else {
global_id = Cast<HloAllGatherInstruction>(hlo)->use_global_device_ids();
}
if (global_id) {
bool replicated_across_partitions = true;
bool replicated_across_replicas = true;
const int64_t num_partitions =
hlo->GetModule()->config().num_partitions();
absl::flat_hash_set<int64_t> visited_partitions;
absl::flat_hash_set<int64_t> visited_replicas;
for (const auto& group : hlo->replica_groups()) {
visited_partitions.clear();
visited_replicas.clear();
visited_replicas.reserve(group.replica_ids().size());
visited_partitions.reserve(group.replica_ids().size());
for (int64_t id : group.replica_ids()) {
int64_t rid = id / num_partitions;
int64_t pid = id % num_partitions;
visited_partitions.insert(pid);
visited_replicas.insert(rid);
}
replicated_across_partitions &=
visited_partitions.size() == num_partitions;
replicated_across_replicas &=
visited_replicas.size() ==
hlo->GetModule()->config().replica_count();
}
if ((cross_partition_spmd && replicated_across_partitions) ||
(!cross_partition_spmd && replicated_across_replicas)) {
return HloReplication::ReplicatedOnAllDevices();
} else {
return HloReplication::UniqueOnAllDevices();
}
}
if (cross_partition_spmd) {
return HloReplication::ReplicatedOnAllDevices();
}
if (hlo->replica_groups().empty() || hlo->replica_groups().size() == 1) {
return HloReplication::ReplicatedOnAllDevices();
} else {
return HloReplication::UniqueOnAllDevices();
}
}
}
if (hlo->HasSideEffectNoRecurse()) {
return HloReplication::UniqueOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kReplicaId) {
return cross_partition_spmd ? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kPartitionId) {
return cross_partition_spmd ? HloReplication::UniqueOnAllDevices()
: HloReplication::ReplicatedOnAllDevices();
}
auto it = hlo_replication.find(hlo);
if (hlo->opcode() == HloOpcode::kParameter) {
CHECK(it != hlo_replication.end());
return it->second.element(index);
}
if (it != hlo_replication.end() &&
it->second.element(index).IsUniqueOnAllDevices()) {
return it->second.element(index);
}
if (hlo->opcode() == HloOpcode::kConstant) {
return HloReplication::ReplicatedOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kCustomCall &&
(hlo->custom_call_target() == "X64SplitLow" ||
hlo->custom_call_target() == "X64SplitHigh" ||
hlo->custom_call_target() == "X64Combine")) {
return merge_operand_replication(hlo);
}
if (support_partial_replication) {
if (hlo->opcode() == HloOpcode::kDynamicSlice) {
const HloInstruction* ds_buffer = hlo->operand(0);
if (hlo->dynamic_slice_sizes().size() == 1 &&
hlo->dynamic_slice_sizes()[0] == 1 &&
ds_buffer->opcode() == HloOpcode::kConstant &&
ds_buffer->shape().rank() == 1 &&
ds_buffer->shape().element_type() == PrimitiveType::S32 &&
((cross_partition_spmd &&
hlo->operand(1)->opcode() == HloOpcode::kPartitionId) ||
(!cross_partition_spmd &&
hlo->operand(1)->opcode() == HloOpcode::kReplicaId))) {
const HloModule* hlo_module = hlo->GetModule();
int64_t num_devices = cross_partition_spmd
? hlo_module->config().num_partitions()
: hlo_module->config().replica_count();
absl::flat_hash_map<int64_t, std::vector<int64_t>> value_to_device_set;
for (int64_t device_id = 0; device_id < num_devices; ++device_id) {
std::optional<int64_t> value =
ds_buffer->literal().GetIntegralAsS64({device_id});
value_to_device_set[*value].push_back(device_id);
}
std::vector<absl::Span<const int64_t>> device_sets;
for (const auto& value_and_device_set : value_to_device_set) {
device_sets.push_back(
absl::Span<const int64_t>(value_and_device_set.second));
}
return HloReplication::PartiallyReplicated(device_sets);
}
}
}
if (hlo->IsElementwise() ||
hlo->opcode() == HloOpcode::kConcatenate ||
hlo->opcode() == HloOpcode::kConvolution ||
hlo->opcode() == HloOpcode::kDot ||
hlo->opcode() == HloOpcode::kReduce ||
hlo->opcode() == HloOpcode::kBroadcast ||
hlo->opcode() == HloOpcode::kTranspose ||
hlo->opcode() == HloOpcode::kReshape ||
hlo->opcode() == HloOpcode::kBitcast ||
hlo->opcode() == HloOpcode::kReverse ||
hlo->opcode() == HloOpcode::kGather ||
hlo->opcode() == HloOpcode::kScatter ||
hlo->opcode() == HloOpcode::kIota ||
hlo->opcode() == HloOpcode::kPad ||
hlo->opcode() == HloOpcode::kSlice ||
hlo->opcode() == HloOpcode::kDynamicSlice ||
hlo->opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo->opcode() == HloOpcode::kReduceWindow ||
hlo->opcode() == HloOpcode::kCopy) {
return merge_operand_replication(hlo);
}
return HloReplication::UniqueOnAllDevices();
}
bool HloReplicationAnalysis::ComputeHloReplicationOnComputation(
const HloComputation* computation, bool mark_everything_not_replicated) {
bool changed = false;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
auto assign_or_combine_shapetree =
[&](ShapeTree<HloReplication>&& to_combine,
const HloInstruction* dest) {
auto it = hlo_replication_.find(dest);
if (it == hlo_replication_.end()) {
hlo_replication_[dest] = std::move(to_combine);
return true;
}
bool updated = false;
it->second.ForEachMutableElement(
[&](const ShapeIndex& index, HloReplication* element) {
HloReplication new_replication =
element->Merge(to_combine.element(index));
if (!element->Equal(new_replication)) {
*element = std::move(new_replication);
updated = true;
}
});
return updated;
};
auto propagate_shapetree = [&](const HloInstruction* source,
const HloInstruction* dest) {
auto source_it = hlo_replication_.find(source);
if (source_it == hlo_replication_.end()) {
return false;
}
return assign_or_combine_shapetree(
ShapeTree<HloReplication>(source_it->second), dest);
};
if (inst->opcode() == HloOpcode::kWhile) {
while (true) {
bool updated = propagate_shapetree(
inst->operand(0),
inst->while_condition()->parameter_instruction(0));
updated |= propagate_shapetree(
inst->while_body()->root_instruction(),
inst->while_condition()->parameter_instruction(0));
updated |= propagate_shapetree(
inst->operand(0), inst->while_body()->parameter_instruction(0));
updated |=
propagate_shapetree(inst->while_body()->root_instruction(),
inst->while_body()->parameter_instruction(0));
updated |= ComputeHloReplicationOnComputation(
inst->while_condition(), mark_everything_not_replicated);
if (!ContainsKey(loops_known_with_same_iterations_, inst) &&
!hlo_replication_[inst->while_condition()->root_instruction()]
.element({})
.IsReplicatedOnAllDevices()) {
updated |= ComputeHloReplicationOnComputation(
inst->while_body(), true);
} else {
updated |= ComputeHloReplicationOnComputation(
inst->while_body(), mark_everything_not_replicated);
}
if (!updated) {
break;
}
changed = true;
}
changed |= propagate_shapetree(inst->operand(0), inst);
changed |=
propagate_shapetree(inst->while_body()->root_instruction(), inst);
} else if (inst->opcode() == HloOpcode::kCall ||
inst->opcode() == HloOpcode::kFusion) {
auto called = inst->called_computations().front();
for (int64_t i = 0; i < inst->operand_count(); ++i) {
changed |= propagate_shapetree(inst->operand(i),
called->parameter_instruction(i));
}
changed |= ComputeHloReplicationOnComputation(
called, mark_everything_not_replicated);
changed |= propagate_shapetree(called->root_instruction(), inst);
} else if (inst->opcode() == HloOpcode::kConditional) {
for (int64_t i = 0; i < inst->called_computations().size(); ++i) {
changed |= propagate_shapetree(
inst->operand(i + 1),
inst->called_computations()[i]->parameter_instruction(0));
}
if (!hlo_replication_[inst->operand(0)]
.element({})
.IsReplicatedOnAllDevices()) {
for (auto called : inst->called_computations()) {
changed |= ComputeHloReplicationOnComputation(
called,
true);
}
changed |= assign_or_combine_shapetree(
ShapeTree<HloReplication>(inst->shape(),
HloReplication::UniqueOnAllDevices()),
inst);
} else {
for (auto called : inst->called_computations()) {
changed |= ComputeHloReplicationOnComputation(
called, mark_everything_not_replicated);
changed |= propagate_shapetree(called->root_instruction(), inst);
}
}
} else if (inst->opcode() == HloOpcode::kTuple) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
for (int64_t i = 0; i < inst->operand_count(); ++i) {
shape_tree.CopySubtreeFrom(hlo_replication_[inst->operand(i)], {}, {i});
}
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kOptimizationBarrier) {
ShapeTree<HloReplication> shape_tree = hlo_replication_[inst->operand(0)];
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kGetTupleElement) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
shape_tree.CopySubtreeFrom(hlo_replication_[inst->operand(0)],
{inst->tuple_index()}, {});
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kInfeed && cross_partition_spmd_) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::UniqueOnAllDevices());
if (inst->has_sharding()) {
auto sharding = inst->sharding().GetAsShapeTree(inst->shape());
shape_tree.ForEachMutableElement(
[&sharding](const ShapeIndex& index, HloReplication* data) {
*data = sharding.element(index).IsReplicated()
? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
});
}
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else {
if (mark_everything_not_replicated) {
changed |= assign_or_combine_shapetree(
ShapeTree<HloReplication>(inst->shape(),
HloReplication::UniqueOnAllDevices()),
inst);
} else {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
ShapeUtil::ForEachSubshape(
inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
*shape_tree.mutable_element(index) =
DetermineHloInstructionIsReplicated(
inst, index, cross_partition_spmd_, hlo_replication_,
support_partial_replication_);
});
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
}
}
}
return changed;
}
absl::Status HloReplicationAnalysis::ComputeHloReplication() {
auto entry = module_->entry_computation();
for (int i = 0; i < entry->num_parameters(); ++i) {
auto param = entry->parameter_instruction(i);
ShapeTree<HloReplication> shape_tree(param->shape(),
HloReplication::UniqueOnAllDevices());
const auto& replication = param->parameter_replicated_at_leaf_buffers();
int leaf_index = 0;
absl::Status status = ShapeUtil::ForEachSubshapeWithStatus(
param->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(param->shape(), index)) {
return absl::OkStatus();
}
if (cross_partition_spmd_ && param->has_sharding()) {
TF_ASSIGN_OR_RETURN(auto sharding_tree,
param->sharding().AsShapeTree(param->shape()));
*shape_tree.mutable_element(index) =
sharding_tree.element(index).IsReplicated()
? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
}
if (replication) {
if (!cross_partition_spmd_ && (*replication)[leaf_index]) {
*shape_tree.mutable_element(index) =
HloReplication::ReplicatedOnAllDevices();
}
if (cross_partition_spmd_ && !(*replication)[leaf_index]) {
*shape_tree.mutable_element(index) =
HloReplication::UniqueOnAllDevices();
}
++leaf_index;
}
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
hlo_replication_[param] = std::move(shape_tree);
}
ComputeHloReplicationOnComputation(entry,
false);
return absl::OkStatus();
}
bool HloReplicationAnalysis::HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index) const {
auto it = hlo_replication_.find(inst);
if (it == hlo_replication_.end()) {
return false;
}
return it->second.element(index).IsReplicatedOnAllDevices();
}
bool HloReplicationAnalysis::HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index,
absl::Span<const ReplicaGroup> replica_groups) const {
auto it = hlo_replication_.find(inst);
if (it == hlo_replication_.end()) {
return false;
}
VLOG(5) << "HloInstructionIsReplicatedAt is called on " << inst->name()
<< ", index: " << index.ToString()
<< ", replication: " << it->second.element(index).ToString();
if (replica_groups.empty()) {
return it->second.element(index).IsReplicatedOnAllDevices();
}
if (it->second.element(index).IsReplicatedOnAllDevices()) {
return true;
}
if (it->second.element(index).IsUniqueOnAllDevices()) {
return false;
}
for (const ReplicaGroup& replica_group : replica_groups) {
if (!it->second.element(index).IsReplicatedWithinSubgroup(
replica_group.replica_ids())) {
return false;
}
}
return true;
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::Run(const HloModule* module,
bool cross_partition_spmd) {
const absl::flat_hash_set<const HloInstruction*> empty;
return Run(module, cross_partition_spmd, &empty);
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::Run(const HloModule* module, bool cross_partition_spmd,
const absl::flat_hash_set<const HloInstruction*>*
loops_known_with_same_iterations) {
auto analysis = absl::WrapUnique(new HloReplicationAnalysis(
module, cross_partition_spmd, loops_known_with_same_iterations,
false));
TF_RETURN_IF_ERROR(analysis->ComputeHloReplication());
return analysis;
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::RunWithPartialReplication(const HloModule* module,
bool cross_partition_spmd) {
const absl::flat_hash_set<const HloInstruction*> empty;
auto analysis = absl::WrapUnique(
new HloReplicationAnalysis(module, cross_partition_spmd, &empty,
true));
TF_RETURN_IF_ERROR(analysis->ComputeHloReplication());
return analysis;
}
HloReplicationAnalysis::HloReplication::HloReplication()
: state_(State::kReplicatedOnAllDevices) {}
HloReplicationAnalysis::HloReplication::HloReplication(
HloReplicationAnalysis::HloReplication::State state,
absl::Span<const int64_t> device_set_root)
: state_(state),
device_set_root_(device_set_root.begin(), device_set_root.end()) {
CHECK(state == State::kPartiallyReplicated || device_set_root_.empty());
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::ReplicatedOnAllDevices() {
return HloReplication(State::kReplicatedOnAllDevices, {});
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::UniqueOnAllDevices() {
return HloReplication(State::kUniqueOnAllDevices, {});
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::PartiallyReplicated(
absl::Span<const absl::Span<const int64_t>> device_sets) {
int64_t max_device_id = 0;
for (const absl::Span<const int64_t>& device_set : device_sets) {
for (int64_t device_id : device_set) {
max_device_id = std::max(max_device_id, device_id);
}
}
std::vector<int64_t> device_set_root;
device_set_root.resize(max_device_id + 1);
for (const absl::Span<const int64_t>& device_set : device_sets) {
int64_t min_device_id = *absl::c_min_element(device_set);
for (int64_t device_id : device_set) {
device_set_root[device_id] = min_device_id;
}
}
return HloReplication(State::kPartiallyReplicated, device_set_root);
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::Merge(
const HloReplication& other) const {
switch (state_) {
case State::kReplicatedOnAllDevices:
return other;
case State::kUniqueOnAllDevices:
return *this;
case State::kPartiallyReplicated: {
switch (other.state_) {
case State::kReplicatedOnAllDevices:
return *this;
case State::kUniqueOnAllDevices:
return other;
case State::kPartiallyReplicated: {
absl::flat_hash_map<int64_t, std::vector<int64_t>>
value_to_device_set;
size_t num_devices = device_set_root_.size();
for (int64_t device_id = 0; device_id < num_devices; ++device_id) {
int64_t new_value = device_set_root_[device_id] * num_devices +
other.device_set_root_[device_id];
value_to_device_set[new_value].push_back(device_id);
}
CHECK_LE(value_to_device_set.size(), num_devices);
if (value_to_device_set.size() == 1) {
return ReplicatedOnAllDevices();
} else if (value_to_device_set.size() < num_devices) {
std::vector<absl::Span<const int64_t>> device_sets;
for (const auto& value_and_device_set : value_to_device_set) {
device_sets.push_back(
absl::Span<const int64_t>(value_and_device_set.second));
}
return PartiallyReplicated(device_sets);
} else {
return UniqueOnAllDevices();
}
}
}
}
}
}
bool HloReplicationAnalysis::HloReplication::Equal(
const HloReplication& other) const {
if (state_ != other.state_) {
return false;
}
return absl::c_equal(device_set_root_, other.device_set_root_);
}
bool HloReplicationAnalysis::HloReplication::IsReplicatedOnAllDevices() const {
return state_ == State::kReplicatedOnAllDevices;
}
bool HloReplicationAnalysis::HloReplication::IsUniqueOnAllDevices() const {
return state_ == State::kUniqueOnAllDevices;
}
bool HloReplicationAnalysis::HloReplication::IsReplicatedWithinSubgroup(
absl::Span<const int64_t> device_ids) const {
if (device_ids.empty()) return true;
return absl::c_all_of(device_ids, [this, &device_ids](int device_id) {
return device_set_root_[device_id] == device_set_root_[device_ids.front()];
});
}
std::string HloReplicationAnalysis::HloReplication::ToString() const {
switch (state_) {
case State::kReplicatedOnAllDevices:
return "ReplicatedOnAllDevices";
case State::kUniqueOnAllDevices:
return "UniqueOnAllDevices";
case State::kPartiallyReplicated:
return absl::StrCat("PartiallyReplicated{",
absl::StrJoin(device_set_root_, ","), "}");
}
}
} | #include "xla/service/hlo_replication_analysis.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloReplicationAnalysisTest : public HloTestBase {};
TEST_F(HloReplicationAnalysisTest, NoControlFlow) {
const std::string module_str = R"(
HloModule NoControlFlow
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
sum.u32 {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY entry {
param = (f32[4096,4096]{1,0}, f32[4096,4096]{1,0}) parameter(0)
get-tuple-element.2 = f32[4096,4096]{1,0} get-tuple-element(param), index=0
get-tuple-element.3 = f32[4096,4096]{1,0} get-tuple-element(param), index=1
after-all.1 = token[] after-all()
replica-id = u32[] replica-id()
infeed = (f32[4096,4096]{1,0}, token[]) infeed(after-all.1)
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed), index=0
dot = f32[4096,4096]{1,0} dot(get-tuple-element.5, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
all-reduce = f32[4096,4096]{1,0} all-reduce(dot), replica_groups={},
to_apply=sum
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.3, all-reduce)
all-reduce-partitions = u32[] all-reduce(replica-id), channel_id=1,
to_apply=sum.u32, replica_groups={{0},{1},{2},{3}}
all-reduce-subgroup = u32[] all-reduce(replica-id),
replica_groups={{0,1},{2,3}}, to_apply=sum.u32
ROOT add = f32[4096,4096]{1,0} add(get-tuple-element.2, subtract)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 4));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{false, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.3"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "replica-id"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-subgroup"), {}));
}
TEST_F(HloReplicationAnalysisTest, NoControlFlowSPMD) {
const std::string module_str = R"(
HloModule NoControlFlow
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
sum.u32 {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY entry {
param = (f32[4096,4096]{1,0}, f32[4096,4096]{1,0}, f32[4096,4096]{1,0})
parameter(0), sharding={{maximal device=0}, {replicated}, {replicated}}
get-tuple-element.2 = f32[4096,4096]{1,0} get-tuple-element(param), index=0
get-tuple-element.3 = f32[4096,4096]{1,0} get-tuple-element(param), index=1
get-tuple-element.4 = f32[4096,4096]{1,0} get-tuple-element(param), index=2
after-all.1 = token[] after-all()
replica-id = u32[] replica-id()
partition-id = u32[] partition-id()
infeed = ((f32[4096,4096]{1,0}, f32[8,8]{1,0}), token[]) infeed(after-all.1),
sharding={{maximal device=0}, {replicated}, {maximal device=0}}
infeed-data = (f32[4096,4096]{1,0}, f32[8,8]{1,0}) get-tuple-element(infeed),
index=0
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed-data),
index=0
get-tuple-element.6 = f32[8,8]{1,0} get-tuple-element(infeed-data), index=1
dot = f32[4096,4096]{1,0} dot(get-tuple-element.5, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot.2 = f32[4096,4096]{1,0} dot(get-tuple-element.4, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
all-reduce = f32[4096,4096]{1,0} all-reduce(dot), replica_groups={},
to_apply=sum
all-reduce.2 = f32[4096,4096]{1,0} all-reduce(dot.2), replica_groups={},
to_apply=sum
all-reduce-subgroup = f32[4096,4096]{1,0} all-reduce(dot),
replica_groups={{0,1},{2,3}}, to_apply=sum
all-reduce-partitions = f32[4096,4096]{1,0} all-reduce(get-tuple-element.2),
channel_id=1, to_apply=sum
all-reduce-partitions.2 = f32[4096,4096]{1,0} all-reduce(get-tuple-element.4),
channel_id=1, to_apply=sum
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.3,
all-reduce-partitions)
subtract.2 = f32[4096,4096]{1,0} subtract(get-tuple-element.3,
all-reduce-partitions.2)
all-reduce-same-operand = u32[] all-reduce(replica-id), to_apply=sum.u32
all-reduce-same-operand-subgroup = u32[] all-reduce(replica-id),
replica_groups={{0,1},{2,3}}, to_apply=sum.u32
all-reduce-different-operand = u32[] all-reduce(partition-id),
to_apply=sum.u32
add = f32[4096,4096]{1,0} add(get-tuple-element.2, subtract)
ROOT add.2 = f32[4096,4096]{1,0} add(get-tuple-element.4, subtract.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 4));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{false, true, false});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(module.get(), true));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.3"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.4"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.6"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot.2"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract.2"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "replica-id"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "partition-id"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-same-operand"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-same-operand-subgroup"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-different-operand"), {}));
}
TEST_F(HloReplicationAnalysisTest, NestedCall) {
const std::string module_str = R"(
HloModule NestedCall
fusion_computation {
fusion_p0 = f32[] parameter(0)
fusion_p1 = f32[] parameter(1)
add = f32[] add(fusion_p0, fusion_p0)
multiply = f32[] multiply(add, fusion_p1)
ROOT tuple = (f32[], f32[]) tuple(add, multiply)
}
call_body {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT fusion = (f32[], f32[]) fusion(a, b), kind=kLoop, calls=fusion_computation
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ROOT call = (f32[], f32[]) call(get-tuple-element, get-tuple-element.1), to_apply=call_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, false});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "multiply"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "fusion"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "fusion"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "call"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "call"), {1}));
}
TEST_F(HloReplicationAnalysisTest, SimpleWhileLoop) {
const std::string module_str = R"(
HloModule SimpleWhileLoop
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
constant.1 = u32[] constant(1)
add = u32[] add(get-tuple-element.6, constant.1)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(multiply, add)
}
ENTRY SimpleWhileLoop {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest,
WhileLoopParameterAliasingNonReplicatedOutput) {
const std::string module_str = R"(
HloModule WhileLoopParameterAliasingNonReplicatedOutput
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
after-all.1 = token[] after-all()
infeed = (f32[4096,4096]{1,0}, token[]) infeed(after-all.1)
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed), index=0
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.5, multiply)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
constant.1 = u32[] constant(1)
add = u32[] add(get-tuple-element.6, constant.1)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(subtract, add)
}
ENTRY WhileLoopParameterAliasingNonReplicatedOutput {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "multiply"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest, WhileLoopDifferentCondition) {
const std::string module_str = R"(
HloModule WhileLoopDifferentCondition
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
replica-id = u32[] replica-id()
add = u32[] add(get-tuple-element.6, replica-id)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(multiply, add)
}
ENTRY WhileLoopDifferentCondition {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest, SimpleConditional) {
const std::string module_str = R"(
HloModule SimpleConditional
Negate {
x = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(x), index=0
negate = f32[] negate(get-tuple-element)
get-tuple-element.1 = f32[] get-tuple-element(x), index=1
negate.1 = f32[] negate(get-tuple-element.1)
ROOT tuple = (f32[], f32[]) tuple(negate, negate.1)
}
Identity {
ROOT y = (f32[], f32[]) parameter(0)
}
Floor {
z = (f32[], f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(z), index=0
floor = f32[] floor(get-tuple-element.2)
get-tuple-element.3 = f32[] get-tuple-element(z), index=1
floor.1 = f32[] floor(get-tuple-element.3)
ROOT tuple.1 = (f32[], f32[]) tuple(floor, floor.1)
}
ENTRY entry {
param = ((f32[], f32[]), (f32[], f32[]), (f32[], f32[]), s32[]) parameter(0)
get-tuple-element.4 = (f32[], f32[]) get-tuple-element(param), index=0
get-tuple-element.5 = (f32[], f32[]) get-tuple-element(param), index=1
get-tuple-element.6 = (f32[], f32[]) get-tuple-element(param), index=2
get-tuple-element.7 = s32[] get-tuple-element(param), index=3
ROOT conditional = (f32[], f32[]) conditional(get-tuple-element.7, get-tuple-element.4, get-tuple-element.5, get-tuple-element.6), branch_computations={Negate, Identity, Floor}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true, true, true, false, true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {1}));
}
TEST_F(HloReplicationAnalysisTest, ConditionalWithDifferentPredicates) {
const std::string module_str = R"(
HloModule ConditionalWithDifferentPredicates
Negate {
x = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(x), index=0
negate = f32[] negate(get-tuple-element)
get-tuple-element.1 = f32[] get-tuple-element(x), index=1
negate.1 = f32[] negate(get-tuple-element.1)
ROOT tuple = (f32[], f32[]) tuple(negate, negate.1)
}
Identity {
ROOT y = (f32[], f32[]) parameter(0)
}
Floor {
z = (f32[], f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(z), index=0
floor = f32[] floor(get-tuple-element.2)
get-tuple-element.3 = f32[] get-tuple-element(z), index=1
floor.1 = f32[] floor(get-tuple-element.3)
ROOT tuple.1 = (f32[], f32[]) tuple(floor, floor.1)
}
ENTRY entry {
param = ((f32[], f32[]), (f32[], f32[]), (f32[], f32[])) parameter(0)
get-tuple-element.4 = (f32[], f32[]) get-tuple-element(param), index=0
get-tuple-element.5 = (f32[], f32[]) get-tuple-element(param), index=1
get-tuple-element.6 = (f32[], f32[]) get-tuple-element(param), index=2
replica-id = u32[] replica-id()
id = s32[] bitcast-convert(replica-id)
ROOT conditional = (f32[], f32[]) conditional(id, get-tuple-element.4,
get-tuple-element.5, get-tuple-element.6),
branch_computations={Negate, Identity, Floor}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true, true, true, true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {1}));
}
TEST_F(HloReplicationAnalysisTest, X64SplitCombine) {
const std::string module_str = R"(
HloModule SimpleX64SplitCombine
ENTRY entry {
param = (f64[]) parameter(0)
gte = f64[] get-tuple-element(param), index=0
param-low = f32[] custom-call(gte), custom_call_target="X64SplitLow"
param-high = f32[] custom-call(gte), custom_call_target="X64SplitHigh"
ROOT result-combine = f64[] custom-call(param-low, param-high), custom_call_target="X64Combine"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(absl::Span<const bool>{true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "param-low"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "param-high"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "result-combine"), {}));
}
TEST_F(HloReplicationAnalysisTest, CrossModuleAndReplicaAllReduce) {
const std::string module_str = R"(
HloModule CrossModuleAndReplicaAllReduce
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element.0 = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ar0 = f32[] all-reduce(get-tuple-element.0), to_apply=sum, replica_groups={{0,1}}
ar1 = f32[] all-reduce(get-tuple-element.1), to_apply=sum, replica_groups={{0},{1}}
ROOT tuple = (f32[], f32[]) tuple(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 2));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ar0"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ar1"), {}));
}
TEST_F(HloReplicationAnalysisTest, GlobalIdAllGather) {
const std::string module_str = R"(
HloModule GlobalIdAllGather
ENTRY entry {
param = f32[1] parameter(0)
ag1 = f32[2] all-gather(param), replica_groups={{0,1},{2,3}}, dimensions={0},
use_global_device_ids=true, channel_id=1
ag2 = f32[2] all-gather(param), replica_groups={{0,2},{1,3}}, dimensions={0},
use_global_device_ids=true, channel_id=2
ag3 = f32[4] all-gather(param), replica_groups={{0,1,2,3}}, dimensions={0},
use_global_device_ids=true, channel_id=3
ROOT tuple = (f32[2], f32[2], f32[4]) tuple(ag1, ag2, ag3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 2,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> replica_analysis,
HloReplicationAnalysis::Run(module.get(),
false));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> partition_analysis,
HloReplicationAnalysis::Run(module.get(),
true));
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag1"), {}));
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag2"), {}));
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag3"), {}));
EXPECT_TRUE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag1"), {}));
EXPECT_FALSE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag2"), {}));
EXPECT_TRUE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag3"), {}));
}
TEST_F(HloReplicationAnalysisTest, PartiallyReplicatedDynamicSlice) {
const std::string module_str = R"(
HloModule PartiallyReplicatedDynamicSlice
ENTRY entry {
constant = s32[8] constant({1, 3, 9, 10, 1, 3, 9, 10})
replica-id = u32[] replica-id()
ROOT dynamic-slice = s32[1] dynamic-slice(constant, replica-id), dynamic_slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 8,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> replica_analysis,
HloReplicationAnalysis::RunWithPartialReplication(
module.get(),
false));
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}));
std::vector<ReplicaGroup> replica_groups(4);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(4);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(5);
replica_groups[2].add_replica_ids(2);
replica_groups[2].add_replica_ids(6);
replica_groups[3].add_replica_ids(3);
replica_groups[3].add_replica_ids(7);
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}, replica_groups));
std::vector<ReplicaGroup> replica_groups_2(2);
replica_groups_2[0].add_replica_ids(0);
replica_groups_2[0].add_replica_ids(1);
replica_groups_2[0].add_replica_ids(2);
replica_groups_2[0].add_replica_ids(3);
replica_groups_2[1].add_replica_ids(4);
replica_groups_2[1].add_replica_ids(5);
replica_groups_2[1].add_replica_ids(6);
replica_groups_2[1].add_replica_ids(7);
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}, replica_groups_2));
}
TEST_F(HloReplicationAnalysisTest, OptimizationBarrier) {
const std::string module_str = R"(
HloModule OptimizationBarrier
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element.0 = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ar0 = f32[] all-reduce(get-tuple-element.0), to_apply=sum, replica_groups={{0,1}}
ar1 = f32[] all-reduce(get-tuple-element.1), to_apply=sum, replica_groups={{0},{1}}
tuple = (f32[], f32[]) tuple(ar0, ar1)
opt-barrier = (f32[], f32[]) opt-barrier(tuple)
gte.0 = f32[] get-tuple-element(opt-barrier), index=0
gte.1 = f32[] get-tuple-element(opt-barrier), index=1
ROOT tuple.1 = (f32[], f32[]) tuple(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 2));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte.0"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte.1"), {}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_replication_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_replication_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc5b1af9-28e1-4a89-9dd4-8d8871cdac9a | cpp | tensorflow/tensorflow | hlo_proto_util | third_party/xla/xla/service/hlo_proto_util.cc | third_party/xla/xla/service/hlo_proto_util_test.cc | #include "xla/service/hlo_proto_util.h"
#include <memory>
#include <string>
#include <vector>
#include "xla/service/hlo_verifier.h"
#include "xla/util.h"
namespace xla {
HloProto MakeHloProto(const HloModule& module,
const BufferAssignment& assignment) {
BufferAssignmentProto proto_assignment = assignment.ToProto();
HloProto proto = MakeHloProto(module);
proto.mutable_buffer_assignment()->Swap(&proto_assignment);
return proto;
}
HloProto MakeHloProto(const HloModule& module) {
HloModuleProto proto_module = module.ToProto();
HloProto proto;
proto.mutable_hlo_module()->Swap(&proto_module);
return proto;
}
absl::StatusOr<std::unique_ptr<HloModule>> CreateModuleFromProto(
const HloModuleProto& proto, const HloModuleConfig& module_config,
bool is_module_post_optimizations) {
VLOG(4) << proto.ShortDebugString();
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(proto, module_config));
TF_RETURN_IF_ERROR(
HloVerifier(false,
is_module_post_optimizations)
.Run(module.get())
.status());
return module;
}
absl::StatusOr<std::vector<const ShapeProto*>> EntryComputationParameterShapes(
const HloProto& hlo_proto) {
if (!hlo_proto.has_hlo_module()) {
return NotFound("HloProto missing HloModuleProto.");
}
if (!hlo_proto.hlo_module().has_host_program_shape()) {
return NotFound("HloProto missing program shape.");
}
std::vector<const ShapeProto*> parameter_shapes;
const auto& program_shape = hlo_proto.hlo_module().host_program_shape();
for (const ShapeProto& shape : program_shape.parameters()) {
parameter_shapes.push_back(&shape);
}
return parameter_shapes;
}
absl::StatusOr<const ShapeProto*> EntryComputationOutputShape(
const HloProto& hlo_proto) {
if (!hlo_proto.has_hlo_module()) {
return NotFound("HloProto missing HloModuleProto.");
}
if (!hlo_proto.hlo_module().has_host_program_shape()) {
return NotFound("HloProto missing program shape.");
}
if (!hlo_proto.hlo_module().host_program_shape().has_result()) {
return NotFound("HloProto missing result in its program shape");
}
return &hlo_proto.hlo_module().host_program_shape().result();
}
} | #include "xla/service/hlo_proto_util.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloProtoUtilTest : public ::testing::Test {};
TEST_F(HloProtoUtilTest, ParamsAndOutputShapeMissingModule) {
HloProto hlo_proto;
auto status = EntryComputationParameterShapes(hlo_proto).status();
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.message(), ::testing::HasSubstr("missing HloModuleProto"));
}
TEST_F(HloProtoUtilTest, MissingProgramShape) {
HloProto hlo_proto;
HloModuleProto* module = hlo_proto.mutable_hlo_module();
module->set_name("entry");
auto status = EntryComputationParameterShapes(hlo_proto).status();
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.message(), ::testing::HasSubstr("missing program shape"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_proto_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_proto_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10c4bede-30e8-4734-9dc4-15868981b86b | cpp | tensorflow/tensorflow | async_collective_creator | third_party/xla/xla/service/async_collective_creator.cc | third_party/xla/xla/service/async_collective_creator_test.cc | #include "xla/service/async_collective_creator.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct ReplacedAsync {
HloInstruction* start;
HloInstruction* done;
};
absl::StatusOr<ReplacedAsync> CreateAsyncAllReduce(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ar = Cast<HloAllReduceInstruction>(instruction);
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllReduceStart(
ar->shape(), ar->operands(), ar->to_apply(), ar->device_list(),
ar->constrain_layout(), ar->channel_id(),
ar->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ar->shape(), HloOpcode::kAllReduceDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncAllGather(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ag = Cast<HloAllGatherInstruction>(instruction);
std::vector<const Shape*> operand_shapes;
operand_shapes.reserve(ag->operand_count());
for (const HloInstruction* op : ag->operands()) {
operand_shapes.push_back(&op->shape());
}
Shape shape = ShapeUtil::MakeTupleShape(
{ag->operand_count() > 1
? ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes)
: *operand_shapes[0],
ag->shape()});
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllGatherStart(
shape, ag->operands(), ag->all_gather_dimension(), ag->device_list(),
ag->constrain_layout(), ag->channel_id(),
ag->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ag->shape(), HloOpcode::kAllGatherDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncCollectivePermute(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
auto* cp = Cast<HloCollectivePermuteInstruction>(instruction);
HloInstruction* start;
HloInstruction* operand = cp->mutable_operand(0);
if (cp->operand_count() == 1) {
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(
{&operand->shape()}, context_shapes)
.value(),
operand, cp->source_target_pairs(), cp->channel_id()));
} else {
CHECK_EQ(cp->operand_count(), 4);
std::vector<const Shape*> operand_shapes;
absl::c_transform(
cp->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(operand_shapes,
context_shapes)
.value(),
operand, cp->mutable_operand(1), cp->mutable_operand(2),
cp->mutable_operand(3), cp->source_target_pairs(),
cp->dynamic_slice_sizes_list(), cp->channel_id()));
if (HasDisjointReadWriteRegionsAttr(cp)) {
SetDisjointReadWriteRegionsAttr(start);
}
}
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
cp->shape(), HloOpcode::kCollectivePermuteDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncStartDone(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
TF_ASSIGN_OR_RETURN(
HloInstruction * done,
computation->CreateAsyncInstructions(instruction, context_shapes,
HloInstruction::kMainExecutionThread,
false));
HloInstruction* start = done->mutable_operand(0);
return ReplacedAsync{start, done};
}
int64_t GetShapeSize(const Shape& shape) {
int64_t size_in_bytes = 0;
if (shape.IsTuple()) {
for (int64_t i = 0; i < shape.tuple_shapes_size(); ++i) {
size_in_bytes += GetShapeSize(shape.tuple_shapes(i));
}
return size_in_bytes;
}
return ShapeUtil::ByteSizeOfElements(shape);
}
}
std::vector<HloInstruction*> AsyncCollectiveCreator::MatchCollectives(
HloComputation* computation) {
std::vector<HloInstruction*> supported_collectives;
for (HloInstruction* instruction : computation->instructions()) {
const HloOpcode op = instruction->opcode();
if ((op == HloOpcode::kAllReduce &&
config_.convert_all_reduce(instruction) &&
GetShapeSize(instruction->shape()) >=
config_.all_reduce_min_threshold_in_bytes) ||
(op == HloOpcode::kAllGather &&
config_.convert_all_gather(instruction)) ||
(op == HloOpcode::kCollectiveBroadcast &&
config_.convert_collective_broadcast(instruction)) ||
(op == HloOpcode::kCollectivePermute &&
config_.convert_collective_permute(instruction)) ||
(op == HloOpcode::kAllToAll &&
config_.convert_all_to_all(instruction)) ||
(op == HloOpcode::kReduceScatter &&
config_.convert_reduce_scatter(instruction))) {
supported_collectives.push_back(instruction);
}
}
return supported_collectives;
}
absl::StatusOr<bool> AsyncCollectiveCreator::ReplaceCollectives(
HloComputation* computation,
std::vector<HloInstruction*>& supported_collectives) {
bool changed = false;
HloModule* module = computation->parent();
absl::flat_hash_map<HloInstruction*, ReplacedAsync> replaced_pairs;
const bool should_update_schedule =
module->has_schedule() &&
module->schedule().is_computation_scheduled(computation);
for (HloInstruction* instruction : supported_collectives) {
absl::StatusOr<ReplacedAsync> async_pair;
switch (instruction->opcode()) {
case HloOpcode::kAllReduce:
async_pair = CreateAsyncAllReduce(instruction);
break;
case HloOpcode::kAllGather:
async_pair = CreateAsyncAllGather(instruction);
break;
case HloOpcode::kCollectivePermute:
async_pair = CreateAsyncCollectivePermute(
instruction, config_.get_context_shapes(instruction));
break;
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kAllToAll:
case HloOpcode::kReduceScatter:
async_pair = CreateAsyncStartDone(
instruction, config_.get_context_shapes(instruction));
break;
default:
return Internal("Unexpected opcode %s",
HloOpcodeString(instruction->opcode()));
}
TF_RETURN_IF_ERROR(async_pair.status());
async_pair->start->set_metadata(instruction->metadata());
async_pair->start->CopyBackendConfigFrom(instruction);
if (should_update_schedule) {
replaced_pairs[instruction] = *async_pair;
}
TF_RETURN_IF_ERROR(
instruction->CopyAllControlDepsTo(async_pair->start, async_pair->done));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
computation->ReplaceInstruction(instruction, async_pair->done),
"replacing ", instruction->ToShortString());
changed = true;
}
if (should_update_schedule) {
std::vector<HloInstruction*> new_sequence;
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
new_sequence.reserve(sequence.size() + replaced_pairs.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_pairs.find(instr);
if (it != replaced_pairs.end()) {
new_sequence.push_back(it->second.start);
new_sequence.push_back(it->second.done);
continue;
}
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
}
return changed;
}
absl::StatusOr<bool> AsyncCollectiveCreator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
int64_t collectives_replaced = 0;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::vector<HloInstruction*> supported_collectives =
MatchCollectives(computation);
if (supported_collectives.empty()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool comp_changed,
ReplaceCollectives(computation, supported_collectives));
collectives_replaced += supported_collectives.size();
changed |= comp_changed;
}
VLOG(1) << "Replaced " << collectives_replaced
<< " sync collectives with async versions.";
return changed;
}
} | #include "xla/service/async_collective_creator.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::NotNull;
using ::testing::SizeIs;
using AsyncAllReduceCreatorTest = HloTestBase;
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllReduce) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[1024] parameter(0)
ROOT ar = f32[1024] all-reduce(p0), to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_reduce = HloPredicateTrue;
config.all_reduce_min_threshold_in_bytes = 4096;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllReduceDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllReduceStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllGather) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ROOT ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllGatherDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllGatherStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleInPlaceCollectivePermute) {
std::string hlo_string = std::string(R"(
HloModule module
ENTRY %module_spmd () -> f32[4,4,128] {
%constant.8 = u32[] constant(0)
%constant.5 = u32[] constant(2)
%tuple.1 = (u32[], u32[], u32[]) tuple(u32[] %constant.8, u32[] %constant.8, u32[] %constant.8)
%tuple = (u32[], u32[], u32[]) tuple(u32[] %constant.5, u32[] %constant.8, u32[] %constant.8)
%custom-call = f32[4,4,128]{2,1,0:T(4,128)} custom-call(), custom_call_target="SomeCustomCall"
ROOT %collective-permute = f32[4,4,128]{2,1,0:T(4,128)} collective-permute(f32[4,4,128]{2,1,0:T(4,128)} %custom-call, f32[4,4,128]{2,1,0:T(4,128)} %custom-call, (u32[], u32[], u32[]) %tuple, (u32[], u32[], u32[]) %tuple.1), channel_id=958, source_target_pairs={{0,4},{4,0},{1,5},{5,1},{2,6},{6,2},{3,7},{7,3}}, slice_sizes={{2,4,128}}
}
)");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 7);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermuteScheduled) {
constexpr absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
const int64_t original_instr_sequence_size =
hlo_module->schedule().sequence(hlo_module->entry_computation()).size();
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(
hlo_module->schedule().sequence(hlo_module->entry_computation()).size(),
original_instr_sequence_size + 1);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectiveBroadcast) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT cb = f32[8,16] collective-broadcast(p0), replica_groups={{7,0,1,2,3,4,5,6}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_broadcast = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kCollectiveBroadcast);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllToAll) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[8,16] all-to-all(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_to_all = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kAllToAll);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleReduceScatter) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[1,16] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_reduce_scatter = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kReduceScatter);
}
TEST_F(AsyncAllReduceCreatorTest, ControlPredecessor) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, control-predecessors={p0}
p1 = f32[1] parameter(1), control-predecessors={ag}
ROOT sum = add(ag, ag)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(
RunHloPass(AsyncCollectiveCreator(config), hlo_module.get()).status());
SCOPED_TRACE(hlo_module->ToString());
HloInstruction* start;
HloInstruction* done;
ASSERT_THAT(
hlo_module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Op(),
m::Op(&done)
.WithOpcode(HloOpcode::kAllGatherDone)
.WithOperand(0, m::Op(&start).WithOpcode(
HloOpcode::kAllGatherStart)))));
EXPECT_EQ(start->control_successors().size(), 0);
ASSERT_EQ(start->control_predecessors().size(), 1);
EXPECT_THAT(start->control_predecessors()[0], GmockMatch(m::Parameter(0)));
EXPECT_EQ(done->control_predecessors().size(), 0);
ASSERT_EQ(done->control_successors().size(), 1);
EXPECT_THAT(done->control_successors()[0], GmockMatch(m::Parameter(1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/async_collective_creator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/async_collective_creator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c50ee19-4508-40ba-8d7f-ca5923c7ad9a | cpp | tensorflow/tensorflow | latency_hiding_scheduler | third_party/xla/xla/service/latency_hiding_scheduler.cc | third_party/xla/xla/service/latency_hiding_scheduler_test.cc | #include "xla/service/latency_hiding_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/map_util.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const int64_t kDefaultMemorySpace = 0;
bool IsNopInstruction(const HloInstruction& hlo) {
HloOpcode op = hlo.opcode();
return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast ||
op == HloOpcode::kConstant || op == HloOpcode::kParameter ||
op == HloOpcode::kBroadcast || op == HloOpcode::kIota ||
hlo.IsEffectiveBitcast() ||
(op == HloOpcode::kTuple && hlo.user_count() == 1 &&
hlo.users().front()->opcode() == HloOpcode::kWhile);
}
bool InstructionDefinesValue(const HloInstruction* instruction,
const HloValue* value) {
if (value->defining_instruction() == instruction) {
return true;
}
if (value->shape().has_layout() &&
value->shape().layout().memory_space() != kDefaultMemorySpace) {
return false;
}
if (instruction->opcode() == HloOpcode::kAsyncStart) {
if (instruction->async_wrapped_opcode() == HloOpcode::kCall) {
return instruction->async_wrapped_instruction()
->called_computations()[0]
->root_instruction() == value->defining_instruction();
}
return instruction->async_wrapped_instruction() ==
value->defining_instruction();
}
return false;
}
bool InstructionFirstDefinesBuffer(
const HloInstruction* instruction,
const BufferInfoTracker::ValueInfo& buffer_value_info) {
if (buffer_value_info.first_definition == instruction) {
return true;
}
if (buffer_value_info.value->values()[0]->shape().has_layout() &&
buffer_value_info.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace) {
return false;
}
if (instruction->opcode() == HloOpcode::kAsyncStart) {
if (instruction->async_wrapped_opcode() == HloOpcode::kCall) {
return instruction->async_wrapped_instruction()
->called_computations()[0]
->root_instruction() == buffer_value_info.first_definition;
}
return instruction->async_wrapped_instruction() ==
buffer_value_info.first_definition;
}
return false;
}
}
CanonicalAsyncOp DefaultGetCanonicalAsyncOp(const HloInstruction& hlo) {
switch (hlo.opcode()) {
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncDone:
if (hlo.async_wrapped_opcode() == HloOpcode::kCall) {
return {hlo.opcode(), hlo.async_wrapped_instruction()
->called_computations()[0]
->root_instruction()
->opcode()};
}
return {hlo.opcode(), hlo.async_wrapped_opcode()};
case HloOpcode::kAllReduceStart:
return {HloOpcode::kAsyncStart, HloOpcode::kAllReduce};
case HloOpcode::kAllGatherStart:
return {HloOpcode::kAsyncStart, HloOpcode::kAllGather};
case HloOpcode::kCollectivePermuteStart:
return {HloOpcode::kAsyncStart, HloOpcode::kCollectivePermute};
case HloOpcode::kCopyStart:
return {HloOpcode::kAsyncStart, HloOpcode::kCopy};
case HloOpcode::kCopyDone:
return {HloOpcode::kAsyncDone, HloOpcode::kCopy};
case HloOpcode::kAllReduceDone:
return {HloOpcode::kAsyncDone, HloOpcode::kAllReduce};
case HloOpcode::kAllGatherDone:
return {HloOpcode::kAsyncDone, HloOpcode::kAllGather};
case HloOpcode::kCollectivePermuteDone:
return {HloOpcode::kAsyncDone, HloOpcode::kCollectivePermute};
default:
return {hlo.opcode(), hlo.opcode()};
}
}
bool LatencyEstimator::IsAsyncPair(const HloGraphNode& from,
const HloGraphNode& target) const {
CanonicalAsyncOp from_op = GetCanonicalAsyncOp(from.GetInstr());
CanonicalAsyncOp target_op = GetCanonicalAsyncOp(target.GetInstr());
return from_op.outer == HloOpcode::kAsyncStart &&
target_op.outer == HloOpcode::kAsyncDone &&
from_op.inner == target_op.inner;
}
bool LatencyEstimator::IsP2pPair(const HloGraphNode& from,
const HloGraphNode& target) const {
return (from.GetInstr().opcode() == HloOpcode::kSend &&
target.GetInstr().opcode() == HloOpcode::kSendDone) ||
(from.GetInstr().opcode() == HloOpcode::kRecv &&
target.GetInstr().opcode() == HloOpcode::kRecvDone);
}
LatencyEstimator::TimeCost ApproximateLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& target) const {
if (IsAsyncPair(from, target)) {
return kHighLatency;
}
return kLowLatency;
}
LatencyEstimator::TimeCost ApproximateLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (instr->IsLoopFusion()) {
return kMediumCost;
}
if (instr->IsOutputFusion() || instr->opcode() == HloOpcode::kConvolution) {
return kHighCost;
}
return kLowCost;
}
bool AsyncTracker::IsSupportedAsyncDone(const HloInstruction& hlo) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(hlo);
if (op.outer == HloOpcode::kSendDone || op.outer == HloOpcode::kRecvDone) {
return config_.schedule_send_recvs;
}
if (op.outer == HloOpcode::kAsyncDone) {
if (hlo.IsAsynchronous() &&
hlo.async_execution_thread() != hlo.parent()->execution_thread()) {
return true;
}
switch (op.inner) {
case HloOpcode::kAllToAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCopy:
case HloOpcode::kReduceScatter:
return true;
default:
return false;
}
}
return false;
}
bool AsyncTracker::IsSupportedAsyncStart(const HloInstruction& hlo) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(hlo);
if (op.outer == HloOpcode::kSend || op.outer == HloOpcode::kRecv) {
return config_.schedule_send_recvs;
}
if (op.outer == HloOpcode::kAsyncStart) {
if (hlo.IsAsynchronous() &&
hlo.async_execution_thread() != hlo.parent()->execution_thread()) {
return true;
}
switch (op.inner) {
case HloOpcode::kAllToAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCopy:
case HloOpcode::kReduceScatter:
return true;
default:
return false;
}
}
return false;
}
ResourcesVector AsyncTracker::GetResourcesFromInstructionImpl(
const HloInstruction& hlo) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(hlo);
auto get_resource_for_op = [](HloOpcode op) -> ResourceType {
switch (op) {
case HloOpcode::kAllReduce:
return ResourceType::kAllReduce;
case HloOpcode::kAllGather:
return ResourceType::kAllGather;
case HloOpcode::kAllToAll:
return ResourceType::kAllToAll;
case HloOpcode::kCollectiveBroadcast:
return ResourceType::kCollectiveBroadcast;
case HloOpcode::kCollectivePermute:
return ResourceType::kCollectivePermute;
case HloOpcode::kCopy:
return ResourceType::kCopy;
case HloOpcode::kReduceScatter:
return ResourceType::kReduceScatter;
default:
return ResourceType::kNoResource;
}
};
if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) {
ResourceType type = get_resource_for_op(op.inner);
if (type == ResourceType::kNoResource) {
return {};
}
ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
return {std::make_pair(ResourceTypeToIndex(type), usage)};
}
switch (hlo.opcode()) {
case HloOpcode::kAfterAll:
return ResourcesVector{
std::make_pair(ResourceTypeToIndex(ResourceType::kSendHost),
ResourceUsageType::kNoResource)};
case HloOpcode::kRecv:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(&hlo)->is_host_transfer()
? std::make_pair(
config_.force_send_recv_to_use_same_resource
? ResourceTypeToIndex(ResourceType::kSendHost)
: ResourceTypeToIndex(ResourceType::kRecvHost),
ResourceUsageType::kResourceRelease)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceRelease)};
case HloOpcode::kSend:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(&hlo)->is_host_transfer()
? std::make_pair(ResourceTypeToIndex(ResourceType::kSendHost),
ResourceUsageType::kResourceRelease)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceRelease)};
case HloOpcode::kRecvDone:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(hlo.operand(0))
->is_host_transfer()
? std::make_pair(
config_.force_send_recv_to_use_same_resource
? ResourceTypeToIndex(ResourceType::kSendHost)
: ResourceTypeToIndex(ResourceType::kRecvHost),
ResourceUsageType::kResourceOccupy)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceOccupy)};
case HloOpcode::kSendDone:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(hlo.operand(0))
->is_host_transfer()
? std::make_pair(ResourceTypeToIndex(ResourceType::kSendHost),
ResourceUsageType::kResourceOccupy)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceOccupy)};
default:
return ResourcesVector{};
}
}
ResourcesVector AsyncTracker::GetResourcesFromInstruction(
const HloInstruction& hlo) const {
if (!resources_cache_.contains(&hlo)) {
resources_cache_.insert({&hlo, GetResourcesFromInstructionImpl(hlo)});
}
return resources_cache_.at(&hlo);
}
int64_t AsyncTracker::GetNumResourcesPerInstruction(
ResourceType resource_type, const HloInstruction& instr) const {
return GetNumResourcesPerInstruction(ResourceTypeToIndex(resource_type),
instr);
}
int64_t AsyncTracker::GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const {
if (instr.called_computations().empty() ||
instr.opcode() == HloOpcode::kAsyncStart ||
instr.opcode() == HloOpcode::kAsyncDone) {
return absl::c_any_of(GetResourcesFromInstruction(instr),
[resource_type](const ResourcePair& resource) {
return resource.second ==
ResourceUsageType::kResourceOccupy &&
(resource_type == resource.first);
})
? 1
: 0;
}
std::function<void(const HloComputation*)> recursively_compute_resource_map =
[this,
&recursively_compute_resource_map](const HloComputation* computation) {
absl::flat_hash_map<int64_t, int64_t> per_opcode_map;
for (HloInstruction* instr : computation->instructions()) {
if (IsSupportedAsyncDone(*instr)) {
for (auto& resource : GetResourcesFromInstruction(*instr)) {
++per_opcode_map[resource.first];
}
}
for (const HloComputation* called_comp :
instr->called_computations()) {
auto it = async_in_computation_cache_.find(called_comp);
if (it == async_in_computation_cache_.end()) {
recursively_compute_resource_map(called_comp);
it = async_in_computation_cache_.find(called_comp);
CHECK(it != async_in_computation_cache_.end());
}
for (auto& called_per_opcode_pair : it->second) {
per_opcode_map[called_per_opcode_pair.first] +=
called_per_opcode_pair.second;
}
}
}
async_in_computation_cache_[computation] = std::move(per_opcode_map);
};
int64_t num_resources = 0;
for (const HloComputation* computation : instr.called_computations()) {
auto it = async_in_computation_cache_.find(computation);
if (it == async_in_computation_cache_.end()) {
recursively_compute_resource_map(computation);
it = async_in_computation_cache_.find(computation);
CHECK(it != async_in_computation_cache_.end());
}
auto opcode_it = it->second.find(resource_type);
if (opcode_it == it->second.end()) {
continue;
}
num_resources += opcode_it->second;
}
return num_resources;
}
void AsyncTracker::SetConcurrentResourceLimits(
absl::flat_hash_map<int64_t, int64_t>& max_concurrent_resource) const {
max_concurrent_resource[ResourceTypeToIndex(
ResourceType::kCollectiveBroadcast)] =
config_.collective_broadcast_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(
ResourceType::kCollectivePermute)] =
config_.collective_permute_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kCopy)] =
config_.copy_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kAllToAll)] =
config_.all_to_all_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kAllGather)] =
config_.all_gather_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kAllReduce)] =
config_.all_reduce_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kReduceScatter)] =
config_.reduce_scatter_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kSendRecv)] =
config_.send_recv_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kSendHost)] =
config_.send_recv_host_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kRecvHost)] =
config_.send_recv_host_overlap_limit;
const int64_t first_target_resource =
AsyncTracker::GetFirstTargetDefinedResource();
for (int64_t i = 0; i < GetNumTargetDefinedResources(); ++i) {
max_concurrent_resource[first_target_resource + i] =
GetNumAvailableResources(first_target_resource + i);
}
}
absl::string_view AsyncTracker::GetResourceName(int64_t resource_type) const {
switch (resource_type) {
case ResourceTypeToIndex(ResourceType::kNoResource):
return "kNoResource";
case ResourceTypeToIndex(ResourceType::kAllToAll):
return "kAllToAll";
case ResourceTypeToIndex(ResourceType::kAllGather):
return "kAllGather";
case ResourceTypeToIndex(ResourceType::kAllReduce):
return "kAllReduce";
case ResourceTypeToIndex(ResourceType::kCollectiveBroadcast):
return "kCollectiveBroadcast";
case ResourceTypeToIndex(ResourceType::kCollectivePermute):
return "kCollectivePermute";
case ResourceTypeToIndex(ResourceType::kCopy):
return "kCopy";
case ResourceTypeToIndex(ResourceType::kSendRecv):
return "kSendRecv";
case ResourceTypeToIndex(ResourceType::kSendHost):
return "kSendHost";
case ResourceTypeToIndex(ResourceType::kRecvHost):
return "kRecvHost";
case ResourceTypeToIndex(ResourceType::kReduceScatter):
return "kReduceScatter";
default:
return "Not a valid default resource";
}
}
absl::string_view AsyncTracker::GetResourceUsageName(
ResourceUsageType resource_usage_type) const {
return GetResourceUsageName(ResourceUsageTypeToIndex(resource_usage_type));
}
ResourceHazardType AsyncTracker::GetResourceHazardType(
int64_t resource_type) const {
return ResourceHazardType::kUnshareable;
}
absl::string_view AsyncTracker::GetResourceUsageName(
int64_t resource_usage_type) const {
switch (resource_usage_type) {
case ResourceUsageTypeToIndex(ResourceUsageType::kNoResource):
return "kNoResource";
case ResourceUsageTypeToIndex(ResourceUsageType::kResourceOccupy):
return "kResourceOccupy";
case ResourceUsageTypeToIndex(ResourceUsageType::kResourceRelease):
return "kResourceRelease";
default:
return "Not a valid resource usage type";
}
}
int64_t AsyncTracker::GetNumTargetDefinedResources() const { return 0; }
int64_t AsyncTracker::GetNumAvailableResources(int64_t resource_type) const {
return 0;
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetReleasedShareableResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetOccupiedShareableResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetOccupiedSerialResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetReleasedNonextendableResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
bool AsyncTracker::ReleasesSelectiveResource(const HloGraphNode* node) const {
return absl::c_any_of(
node->GetResources(), [&](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceRelease &&
GetResourceHazardType(resource.first) ==
ResourceHazardType::kSelective;
});
}
bool AsyncTracker::OccupiesSelectiveResource(const HloGraphNode* node) const {
return absl::c_any_of(
node->GetResources(), [&](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceOccupy &&
GetResourceHazardType(resource.first) ==
ResourceHazardType::kSelective;
});
}
BufferInfoTracker::BufferInfoTracker(
const HloModule* module, const HloAliasAnalysis* alias_analysis,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes) {
buffer_infos_.resize(alias_analysis->buffers().back().id() + 1);
std::function<void(const HloComputation*)> process_computation =
[&process_computation, module, alias_analysis, this,
&shape_size_bytes](const HloComputation* computation) {
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
for (int idx = 0; idx < sequence.size(); ++idx) {
const HloInstruction* instruction = sequence.instructions()[idx];
for (auto* called_computation : instruction->called_computations()) {
if (called_computation->IsFusionComputation()) {
continue;
}
process_computation(called_computation);
}
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
for (const HloBuffer* buffer :
alias_analysis->ComputeBuffersAt(instruction, index)) {
if (buffer_infos_[buffer->id()].value == nullptr) {
buffer_infos_[buffer->id()] =
CreateBufferInfo(buffer, instruction, shape_size_bytes);
}
}
});
}
};
process_computation(module->entry_computation());
}
void ModulePressureState::InitializePressureStates() {
memory_pressure_states_.clear();
std::function<void(HloComputation*,
const MemoryPressureTracker::LiveBufferSet&)>
process_computation = [this, &process_computation](
HloComputation* computation,
const MemoryPressureTracker::LiveBufferSet&
initial_live_buffers) {
const HloInstructionSequence& sequence =
module_->schedule().sequence(computation);
MemoryPressureTracker tracker(hlo_alias_analysis_, buffer_tracker_,
memory_pressure_states_);
tracker.Initialize(computation, initial_live_buffers);
VLOG(6) << "Pressure at bottom for " << computation->name() << ": "
<< tracker.memory_usage();
for (int idx = sequence.size() - 1; idx >= 0; --idx) {
const HloInstruction* instruction = sequence.instructions()[idx];
if (!instruction->called_computations().empty()) {
for (auto* called_computation :
instruction->called_computations()) {
if (called_computation->IsFusionComputation()) {
continue;
}
process_computation(called_computation, tracker.live_buffers());
}
}
VLOG(10) << "Instruction: " << instruction->ToString();
VLOG(10) << "Pressure change: "
<< tracker.MemoryPressureDifference(instruction).first;
VLOG(10) << "Current usage: " << tracker.memory_usage();
tracker.UpdateBuffers(instruction);
VLOG(10) << "Current usage after update: " << tracker.memory_usage();
VLOG(10) << "Current peak after update: "
<< tracker.pressure_state().memory_peak;
}
VLOG(6) << "Pressure peak for " << computation->name() << ": "
<< tracker.pressure_state().memory_peak;
UpdatePressureStateForComputation(computation,
tracker.pressure_state());
};
process_computation(module_->entry_computation(), {});
}
void MemoryPressureTracker::Initialize(
const HloComputation* computation,
const LiveBufferSet& initial_live_buffers) {
live_memory_usage_ = 0;
initial_memory_pressure_ = 0;
pressure_state_ = MemoryPressureState{};
output_buffers_.clear();
defined_buffers_.clear();
live_buffers_set_.clear();
for (auto* instruction : computation->instructions()) {
auto& output_values = this->output_buffers_[instruction];
auto& defined_values = this->defined_buffers_[instruction];
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
for (const HloBuffer* buffer :
hlo_alias_analysis_->ComputeBuffersAt(instruction, index)) {
output_values.push_back(std::make_pair(
buffer_tracker_.GetBufferInfo(buffer->id()), index));
if (absl::c_any_of(buffer->values(), [&](const HloValue* value) {
return InstructionDefinesValue(instruction, value);
})) {
defined_values.push_back(
buffer_tracker_.GetBufferInfo(buffer->id()));
}
}
});
}
if (!initial_live_buffers.empty()) {
for (HloBuffer::Id id : initial_live_buffers) {
auto& buffer = buffer_tracker_.GetBufferInfo(id);
if (buffer.value->values()[0]->shape().has_layout() &&
buffer.value->values()[0]->shape().layout().memory_space() != 0) {
continue;
}
live_buffers_[buffer.value->id()] = 1;
initial_memory_pressure_ += buffer.buffer_size;
}
live_buffers_set_ = initial_live_buffers;
} else {
absl::c_fill(live_buffers_, 0);
}
pressure_state_.live_ids_at_bottom = live_buffers_set_;
}
void MemoryPressureTracker::UpdateBuffers(const HloInstruction* instruction) {
int64_t computations_peak = 0;
for (auto* called_comp : instruction->called_computations()) {
if (called_comp->IsFusionComputation()) {
continue;
}
auto it = pressure_state_cache_.find(called_comp);
CHECK(it != pressure_state_cache_.end());
computations_peak = std::max(computations_peak, it->second.memory_peak);
}
if (pressure_state_.memory_peak < live_memory_usage_ + computations_peak) {
pressure_state_.memory_peak = live_memory_usage_ + computations_peak;
}
for (auto* op : instruction->operands()) {
auto& output_values = output_buffers_[op];
for (auto& info : output_values) {
if (ShouldSkipBufferAllocations(instruction, info.second,
info.first.first_definition) ||
(info.first.value->values()[0]->shape().has_layout() &&
info.first.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace)) {
continue;
}
if (live_buffers_[info.first.value->id()] == 0) {
live_buffers_[info.first.value->id()] = 1;
live_buffers_set_.insert(info.first.value->id());
live_memory_usage_ += info.first.buffer_size;
}
}
}
pressure_state_.memory_peak =
std::max(live_memory_usage_, pressure_state_.memory_peak);
auto it = defined_buffers_.find(instruction);
CHECK(it != defined_buffers_.end());
if (!ShouldSkipBufferReleases(instruction)) {
for (auto& b : it->second) {
if (b.value->values()[0]->shape().has_layout() &&
b.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace) {
continue;
}
if (live_buffers_[b.value->id()] != 0) {
if (InstructionFirstDefinesBuffer(instruction, b)) {
live_memory_usage_ -= b.buffer_size;
live_buffers_set_.erase(b.value->id());
}
}
}
}
}
std::pair<int64_t, int64_t> MemoryPressureTracker::MemoryPressureDifference(
const HloInstruction* instruction) const {
int64_t increase = 0;
int64_t peak = 0;
if (!instruction->called_computations().empty()) {
int64_t called_comp_peak = 0;
for (auto* called_comp : instruction->called_computations()) {
if (called_comp->IsFusionComputation()) {
continue;
}
auto it = pressure_state_cache_.find(called_comp);
CHECK(it != pressure_state_cache_.end());
peak = called_comp_peak =
std::max(called_comp_peak, it->second.memory_peak);
}
}
for (auto* op : instruction->operands()) {
auto it = output_buffers_.find(op);
CHECK(it != output_buffers_.end());
for (auto& b : it->second) {
if (ShouldSkipBufferAllocations(instruction, b.second,
b.first.first_definition) ||
(b.first.value->values()[0]->shape().has_layout() &&
b.first.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace)) {
continue;
}
if (!live_buffers_[b.first.value->id()]) {
increase += b.first.buffer_size;
}
}
}
peak = std::max(increase, peak);
auto it = defined_buffers_.find(instruction);
CHECK(it != defined_buffers_.end());
if (!ShouldSkipBufferReleases(instruction)) {
for (auto& b : it->second) {
if (b.value->values()[0]->shape().has_layout() &&
b.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace) {
continue;
}
if (live_buffers_[b.value->id()]) {
if (InstructionFirstDefinesBuffer(instruction, b)) {
increase -= b.buffer_size;
}
}
}
}
return std::make_pair(increase, peak);
}
DefaultSchedulerCore::ScheduleCandidate InitializeCandidate(
HloGraphNode* node,
const DefaultSchedulerCore::SchedulingState& sched_state) {
DefaultSchedulerCore::ScheduleCandidate cand;
cand.node = node;
return cand;
}
namespace {
int64_t GetNumHopsToClosestSelectiveOverlap(
const DefaultSchedulerCore::ReadyQueueSet& ready_set,
const HloGraphNode* node) {
int64_t num_hops_to_closest_selective_resource_occupier =
std::numeric_limits<int64_t>::max();
for (const HloGraphNode* n : ready_set) {
if (n == node) {
continue;
}
num_hops_to_closest_selective_resource_occupier =
std::min(num_hops_to_closest_selective_resource_occupier,
n->GetNumHopsToClosestSelectiveResourceOccupier());
}
return num_hops_to_closest_selective_resource_occupier;
}
class ReadySetLt {
public:
explicit ReadySetLt(
const DefaultSchedulerCore::SchedulingState* sched_state,
DefaultSchedulerCore::TargetSchedulingRule target_scheduling_rule,
DefaultSchedulerCore::TargetSchedulingRule early_target_scheduling_rule)
: sched_state_(*sched_state),
target_scheduling_rule_(target_scheduling_rule),
early_target_scheduling_rule_(early_target_scheduling_rule) {}
DefaultSchedulerCore::CandidateResult operator()(
DefaultSchedulerCore::ScheduleCandidate& a,
DefaultSchedulerCore::ScheduleCandidate& b) const {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->GetForceEarly(), a, b.node->GetForceEarly(), b,
"kForceEarly")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
!a.node->GetForceDelay(), a, !b.node->GetForceDelay(), b,
"kForceDelay")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
IsNop(*a.node), a, IsNop(*b.node), b, "kIsNop")) {
return *value;
}
std::pair<int64_t, int64_t> a_increase = std::make_pair(0LL, 0LL);
std::pair<int64_t, int64_t> b_increase = std::make_pair(0LL, 0LL);
if (sched_state_.config.memory_limit != UINT64_MAX &&
sched_state_.memory_pressure_tracker->memory_usage() >
(sched_state_.config.memory_limit / 2)) {
a_increase = GetMemoryPressureChanges(a);
b_increase = GetMemoryPressureChanges(b);
if (sched_state_.memory_pressure_tracker->memory_usage() >=
sched_state_.config.memory_limit) {
if (sched_state_.config.depth_based_memory_pressure_reduction) {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.first < 0 && a_increase.first < b_increase.first,
a,
b_increase.first < 0 && b_increase.first < a_increase.first,
b, "kOnlyDecreaseMemoryOverLimit")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->GetGraphDepth() > b.node->GetGraphDepth(), a,
b.node->GetGraphDepth() > a.node->GetGraphDepth(), b,
"kDepthOverLimit")) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.first < b_increase.first, a,
b_increase.first < a_increase.first, b,
"kDecreaseMemoryOverLimit")) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.second +
sched_state_.memory_pressure_tracker->memory_usage() <=
sched_state_.config.memory_limit,
a,
b_increase.second +
sched_state_.memory_pressure_tracker->memory_usage() <=
sched_state_.config.memory_limit,
b, "kMemoryPeakOverLimit")) {
return *value;
}
}
if (early_target_scheduling_rule_) {
if (auto value = early_target_scheduling_rule_(a, b)) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
ShouldScheduleAsyncDone(a), a, ShouldScheduleAsyncDone(b), b,
"kScheduleDone")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
PastDueCyclesForNonextendableResource(a) >
PastDueCyclesForNonextendableResource(b),
a,
PastDueCyclesForNonextendableResource(b) >
PastDueCyclesForNonextendableResource(a),
b, "kReleaseNonextendable")) {
return *value;
}
if (sched_state_.config.enable_release_start_policy) {
const ApproximateLatencyEstimator::TimeCost a_ready_interval =
a.node->GetReadyTime() - sched_state_.current_time;
const ApproximateLatencyEstimator::TimeCost b_ready_interval =
b.node->GetReadyTime() - sched_state_.current_time;
bool a_ready_and_release =
a_ready_interval <= 0 &&
a.node->DoesReleaseResource(ResourceType::kCollectivePermute);
bool b_ready_and_release =
b_ready_interval <= 0 &&
b.node->DoesReleaseResource(ResourceType::kCollectivePermute);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_ready_and_release, a, b_ready_and_release, b,
"kScheduleStart")) {
return *value;
}
if (a_ready_and_release && b_ready_and_release) {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_ready_interval < b_ready_interval, a,
b_ready_interval < a_ready_interval, b, "kScheduleStart")) {
return *value;
}
}
}
auto async_depth_0_candidate =
[this](DefaultSchedulerCore::ScheduleCandidate& a,
DefaultSchedulerCore::ScheduleCandidate& b)
-> std::optional<DefaultSchedulerCore::CandidateResult> {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
!(a.node->DoesReleaseAnyResource() &&
a.node->GetAsyncDepth() == 0 &&
!IsResourceConstrained(a)),
a,
!(b.node->DoesReleaseAnyResource() &&
b.node->GetAsyncDepth() == 0 && !IsResourceConstrained(b)),
b, "kStartAtZeroDepth")) {
return value;
}
return std::nullopt;
};
if (sched_state_.config.aggressive_scheduling_policies &&
sched_state_.config.prioritize_async_depth_over_stall) {
if (auto value = async_depth_0_candidate(a, b)) {
return *value;
}
}
const ApproximateLatencyEstimator::TimeCost a_ready_interval =
std::max(a.node->GetReadyTime() - sched_state_.current_time, 0.0);
const ApproximateLatencyEstimator::TimeCost b_ready_interval =
std::max(b.node->GetReadyTime() - sched_state_.current_time, 0.0);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_ready_interval < b_ready_interval, a,
b_ready_interval < a_ready_interval, b, "kLessStall")) {
return *value;
}
if (sched_state_.config.resource_serializing) {
const int64_t a_num_conflicting_resources =
GetNumConflictingSerialResources(a);
const int64_t b_num_conflicting_resources =
GetNumConflictingSerialResources(b);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_num_conflicting_resources < b_num_conflicting_resources, a,
b_num_conflicting_resources < a_num_conflicting_resources, b,
"kLessSerialResourceConflict")) {
return *value;
}
}
if (sched_state_.config.aggressive_scheduling_policies &&
!sched_state_.config.prioritize_async_depth_over_stall) {
if (auto value = async_depth_0_candidate(a, b)) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->DoesReleaseAnyResource() && IsResourceConstrained(a), a,
b.node->DoesReleaseAnyResource() && IsResourceConstrained(b), b,
"kFreeBackedupResource")) {
return *value;
}
if (sched_state_.config.aggressive_scheduling_policies) {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->GetAsyncDepth() > b.node->GetAsyncDepth(), a,
b.node->GetAsyncDepth() > a.node->GetAsyncDepth(), b,
"kAsyncDepth")) {
return *value;
}
if (!sched_state_.next_ready_stack.empty()) {
HloGraphNode::TimeCost latest_ready =
sched_state_.next_ready_stack.front()->GetReadyTime();
HloGraphNode::TimeCost a_cost_diff = std::abs(
latest_ready - sched_state_.current_time - a.node->GetCost());
HloGraphNode::TimeCost b_cost_diff = std::abs(
latest_ready - sched_state_.current_time - b.node->GetCost());
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
!a.node->DoesReleaseAnyResource() && a_cost_diff < b_cost_diff,
a,
!b.node->DoesReleaseAnyResource() && b_cost_diff < a_cost_diff,
b, "kAvoidWaste")) {
return *value;
}
}
}
bool a_operands = absl::c_any_of(
a.node->GetInstr().operands(),
[async_tracker = sched_state_.async_tracker](const HloInstruction* i) {
return async_tracker->IsSupportedAsyncDone(*i);
});
bool b_operands = absl::c_any_of(
b.node->GetInstr().operands(),
[async_tracker = sched_state_.async_tracker](const HloInstruction* i) {
return async_tracker->IsSupportedAsyncDone(*i);
});
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_operands, a, b_operands, b, "kUnlockDone")) {
return *value;
}
if (target_scheduling_rule_) {
if (auto value = target_scheduling_rule_(a, b)) {
return *value;
}
}
if (sched_state_.config.enable_selective_resources &&
sched_state_.selective_resource_releasers.empty()) {
int64_t distance_to_selective_overlap_for_a =
GetNumHopsToClosestSelectiveOverlap(sched_state_.ready_set, a.node);
int64_t distance_to_selective_overlap_for_b =
GetNumHopsToClosestSelectiveOverlap(sched_state_.ready_set, b.node);
int64_t max_distance =
sched_state_.config.max_hops_to_closest_selective_overlap;
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
(a.node->GetValuableForSelectiveOverlap() &&
distance_to_selective_overlap_for_a <= max_distance),
b,
(b.node->GetValuableForSelectiveOverlap() &&
distance_to_selective_overlap_for_b <= max_distance),
a, "kNotValuableForSelectiveOverlap")) {
return *value;
}
}
if (sched_state_.config.aggressive_scheduling_policies) {
int ready_if_a_scheduled = ReadyIfScheduled(*a.node);
int ready_if_b_scheduled = ReadyIfScheduled(*b.node);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
ready_if_a_scheduled > ready_if_b_scheduled, a,
ready_if_b_scheduled > ready_if_a_scheduled, b,
"kCreatesMoreReadyNodes")) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.first < 0, a, b_increase.first < 0, b,
"kDecreaseMemory")) {
return *value;
}
if (sched_state_.sched_graph.OriginalInstructionPosition(
&a.node->GetInstr()) >
sched_state_.sched_graph.OriginalInstructionPosition(
&b.node->GetInstr())) {
return {a, "kOriginalOrder"};
}
return {b, "kOriginalOrder"};
}
private:
const DefaultSchedulerCore::SchedulingState& sched_state_;
DefaultSchedulerCore::TargetSchedulingRule target_scheduling_rule_;
DefaultSchedulerCore::TargetSchedulingRule early_target_scheduling_rule_;
int ReadyIfScheduled(const HloGraphNode& gn) const {
int ready_nodes_if_scheduled = 0;
for (auto& pred : gn.GetPredecessors()) {
if (pred.Target().GetOutdegree() == 1) {
++ready_nodes_if_scheduled;
}
}
return ready_nodes_if_scheduled;
}
static bool IsNop(const HloGraphNode& gn) {
return IsNopInstruction(gn.GetInstr());
}
bool IsResourceConstrained(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
if (cand.resource_constrained) {
return *cand.resource_constrained;
}
if (cand.node->GetResources().empty()) {
cand.resource_constrained = false;
return *(cand.resource_constrained);
}
cand.resource_constrained = false;
for (const auto& [resource_type, usage_type] : cand.node->GetResources()) {
auto max_it = sched_state_.max_concurrent_resource.find(resource_type);
auto res_it = sched_state_.resource_users_in_queue.find(resource_type);
cand.resource_constrained =
max_it != sched_state_.max_concurrent_resource.end() &&
max_it->second == 0 &&
res_it != sched_state_.resource_users_in_queue.end() &&
res_it->second > 0;
if (*cand.resource_constrained) {
return *cand.resource_constrained;
}
}
return *cand.resource_constrained;
}
bool ShouldScheduleAsyncDone(
DefaultSchedulerCore::ScheduleCandidate& gn_cand) const {
if (!gn_cand.node->DoesOccupyAnyResource()) {
return false;
}
return !ShouldDelaySendHostDone(gn_cand);
}
HloGraphNode::TimeCost PastDueCyclesForNonextendableResource(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
if (sched_state_.async_tracker
->GetReleasedNonextendableResourcesFromVector(
cand.node->GetResources())
.empty()) {
return 0.0;
}
return std::max(sched_state_.current_time - cand.node->GetReadyTime(), 0.0);
}
bool ShouldDelaySendHostDone(
DefaultSchedulerCore::ScheduleCandidate& gn_cand) const {
const HloGraphNode& gn = *gn_cand.node;
if (!gn.UsesResourceType(ResourceType::kSendHost).has_value() ||
gn.GetInstr().opcode() != HloOpcode::kSendDone) {
return false;
}
const HloGraphNode& start =
sched_state_.sched_graph.GetNode(gn.GetInstr().operand(0));
const LatencyEstimator::TimeCost latency =
sched_state_.latency_estimator->GetLatencyBetween(start, gn);
if (!gn_cand.estimated_connected_send_ready_time.has_value()) {
HloGraphNode::TimeCost start_ready_time = 0;
for (const auto& succ : start.GetSuccessors()) {
if (succ.Target().GetReadyTime() >=
std::numeric_limits<HloGraphNode::TimeCost>::max()) {
return false;
}
start_ready_time = std::max(
start_ready_time, succ.Latency() + succ.Target().GetReadyTime());
}
gn_cand.estimated_connected_send_ready_time = start_ready_time;
}
if (*gn_cand.estimated_connected_send_ready_time -
sched_state_.current_time <=
latency) {
return false;
}
return true;
}
std::pair<int64_t, int64_t> GetMemoryPressureChanges(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
if (cand.pressure_change) {
return *cand.pressure_change;
}
std::optional<std::pair<int64_t, int64_t>> start_result;
if (this->sched_state_.async_tracker->IsSupportedAsyncDone(
cand.node->GetInstr())) {
const HloInstruction* start = cand.node->GetInstr().operand_count() > 0
? cand.node->GetInstr().operand(0)
: nullptr;
if (start != nullptr &&
this->sched_state_.async_tracker->IsSupportedAsyncStart(*start)) {
start_result =
sched_state_.memory_pressure_tracker->MemoryPressureDifference(
start);
}
}
cand.pressure_change =
sched_state_.memory_pressure_tracker->MemoryPressureDifference(
&cand.node->GetInstr());
if (start_result.has_value()) {
cand.pressure_change->first =
std::min(start_result->first, cand.pressure_change->first);
cand.pressure_change->second =
std::max(start_result->second, cand.pressure_change->second);
}
return *cand.pressure_change;
}
int64_t GetNumConflictingSerialResources(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
auto resources =
sched_state_.async_tracker->GetOccupiedSerialResourcesFromVector(
cand.node->GetResources());
int64_t num_conflicting_resources = 0;
for (int64_t resource : resources) {
if (!sched_state_.resources_in_flight.contains(resource)) continue;
num_conflicting_resources +=
sched_state_.resources_in_flight.at(resource);
}
return num_conflicting_resources;
}
};
enum SkipNodeReason {
kShouldSkipNodeFunction,
kExceedsOverlapLimit,
};
absl::string_view SkipNodeReasonString(SkipNodeReason reason) {
switch (reason) {
case SkipNodeReason::kShouldSkipNodeFunction:
return "Skipped due to kShouldSkipNodeFunction.";
case SkipNodeReason::kExceedsOverlapLimit:
return "Skipped due to kExceedsOverlapLimit.";
}
}
}
absl::StatusOr<HloGraphNode*>
DefaultSchedulerCore::FindAndExtractBestNodeAvailable(
DefaultSchedulerCore::SchedulingState& sched_state,
DefaultSchedulerCore::ShouldSkipNodeFunction should_skip_node) {
absl::InlinedVector<std::pair<HloGraphNode*, SkipNodeReason>, 2>
skipped_nodes_and_reasons;
auto scheduling_instruction_crosses_overlap_limit =
[&sched_state](const HloInstruction& instr) {
for (const auto& [resource, limit] :
sched_state.max_concurrent_resource) {
auto it = sched_state.resources_in_flight.find(resource);
if (it == sched_state.resources_in_flight.end() || it->second == 0) {
continue;
}
const int64_t num_resources_needed =
sched_state.async_tracker->GetNumResourcesPerInstruction(resource,
instr);
if (limit < num_resources_needed) {
return true;
}
}
return false;
};
VLOG(2) << "Current time: " << sched_state.current_time;
ReadySetLt ready_lt{&sched_state, target_scheduling_rule_,
early_target_scheduling_rule_};
ScheduleCandidate ready_chosen;
auto chosen_it = sched_state.ready_set.end();
for (auto ready_node_it = sched_state.ready_set.begin(),
e = sched_state.ready_set.end();
ready_node_it != e; ++ready_node_it) {
if (should_skip_node && should_skip_node(*ready_node_it)) {
if (ready_chosen.node == nullptr) {
skipped_nodes_and_reasons.push_back(
{*ready_node_it, SkipNodeReason::kShouldSkipNodeFunction});
}
continue;
}
if (scheduling_instruction_crosses_overlap_limit(
(*ready_node_it)->GetInstr())) {
if (ready_chosen.node == nullptr) {
skipped_nodes_and_reasons.push_back(
{*ready_node_it, SkipNodeReason::kExceedsOverlapLimit});
}
continue;
}
ScheduleCandidate ready_candidate =
InitializeCandidate(*ready_node_it, sched_state);
if (ready_chosen.node == nullptr) {
ready_chosen = ready_candidate;
chosen_it = ready_node_it;
VLOG(2) << "Choosing from ready (" << ready_chosen.node->GetInstr().name()
<< ") Reason: First Candidate";
continue;
}
CandidateResult cand_result = ready_lt(ready_candidate, ready_chosen);
const bool new_candidate_selected =
cand_result.result.node == *ready_node_it;
auto print_pressure_change =
[](const std::optional<std::pair<int64_t, int64_t>>& p) {
if (p.has_value()) {
return std::to_string(p.value().first);
}
return std::string("N/A");
};
VLOG(2) << "Choosing from ready ("
<< (new_candidate_selected ? ready_candidate.node->GetInstr().name()
: ready_chosen.node->GetInstr().name())
<< ") vs ("
<< (new_candidate_selected
? ready_chosen.node->GetInstr().name()
: ready_candidate.node->GetInstr().name())
<< ") Reason: " << cand_result.reason << " mem pressure chosen "
<< print_pressure_change(
(new_candidate_selected ? ready_candidate : ready_chosen)
.pressure_change)
<< " mem pressure other "
<< print_pressure_change(
(new_candidate_selected ? ready_chosen : ready_candidate)
.pressure_change);
if (new_candidate_selected) {
ready_chosen = cand_result.result;
chosen_it = ready_node_it;
}
}
if (ready_chosen.node == nullptr) {
return absl::InternalError(absl::StrCat(
"FindAndExtractBestNodeAvailable failed to find a node to "
"schedule, skipped nodes: ",
absl::StrJoin(skipped_nodes_and_reasons, "; ",
[](std::string* out, const auto& pair) {
absl::StrAppend(out, pair.first->GetInstr().name(),
": ",
SkipNodeReasonString(pair.second));
})));
}
CHECK(chosen_it != sched_state.ready_set.end());
std::swap(*chosen_it, sched_state.ready_set.back());
sched_state.ready_set.pop_back();
return ready_chosen.node;
}
void DefaultSchedulerCore::LogInstruction(const HloInstruction* instr) const {
VLOG(5) << instr->ToString();
}
void PrintOccupierList(
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>& occupiers) {
for (int64_t i = 0; i < occupiers.size(); i++) {
VLOG(3) << "\tOccupier " << i << ": "
<< occupiers[i].first->Target().GetInstr().name()
<< ", projected finish time: " << occupiers[i].second
<< " original latency: " << occupiers[i].first->OriginalLatency()
<< " latency: " << occupiers[i].first->Latency();
}
}
bool DefaultSchedulerCore::DeleteOccupierFromResource(
HloGraphNode::TimeCost current_time, HloEdge& edge,
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>& occupiers) {
if (absl::c_any_of(
occupiers,
[&edge](const std::pair<HloEdge*, HloGraphNode::TimeCost>& element) {
return element.first == &edge;
}) == false) {
return false;
}
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>::iterator it =
occupiers.begin();
int64_t num_occupiers = occupiers.size();
HloGraphNode::TimeCost prev_time = current_time;
HloGraphNode::TimeCost accumulated_saved_time = 0;
while (it != occupiers.end() && it->first != &edge) {
if (it->second <= current_time) {
num_occupiers--;
it++;
continue;
}
HloGraphNode::TimeCost remaining_time_of_edge = it->second - prev_time;
prev_time = it->second;
CHECK_GT(num_occupiers, 0);
HloGraphNode::TimeCost current_saved_time =
remaining_time_of_edge / num_occupiers;
accumulated_saved_time += current_saved_time;
CHECK_GE(it->second, accumulated_saved_time);
it->second -= accumulated_saved_time;
num_occupiers--;
it++;
}
CHECK(it != occupiers.end());
if (it->second > current_time) {
HloGraphNode::TimeCost remaining_time_of_edge = it->second - prev_time;
HloGraphNode::TimeCost current_saved_time =
remaining_time_of_edge / num_occupiers;
accumulated_saved_time += current_saved_time;
}
it = occupiers.erase(it);
for (; it != occupiers.end(); it++) {
it->second -= accumulated_saved_time;
}
return true;
}
bool DefaultSchedulerCore::AddOccupierToResource(
HloGraphNode::TimeCost current_time, HloEdge& new_edge,
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>& occupiers) {
CHECK(new_edge.OriginalLatency() > 0 && current_time >= 0);
auto new_edge_remaining = new_edge.OriginalLatency();
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>::iterator it =
occupiers.begin();
int64_t num_occupiers = occupiers.size();
HloGraphNode::TimeCost prev_time = current_time;
HloGraphNode::TimeCost accumulated_delay = 0;
while (it != occupiers.end() &&
it->second - prev_time <= new_edge_remaining * num_occupiers) {
if (it->second <= current_time) {
num_occupiers--;
it++;
continue;
}
HloGraphNode::TimeCost remaining_time_of_edge = it->second - prev_time;
prev_time = it->second;
CHECK_GT(num_occupiers, 0);
HloGraphNode::TimeCost current_delay =
remaining_time_of_edge / num_occupiers;
new_edge_remaining -= current_delay;
accumulated_delay += current_delay;
it->second += accumulated_delay;
num_occupiers--;
it++;
}
num_occupiers++;
HloGraphNode::TimeCost adjusted_remaining_time =
new_edge_remaining * num_occupiers;
it = occupiers.insert(
it, std::make_pair(&new_edge, prev_time + accumulated_delay +
adjusted_remaining_time));
it++;
accumulated_delay += new_edge_remaining;
CHECK(new_edge.OriginalLatency() - 0.0001 < accumulated_delay &&
accumulated_delay < new_edge.OriginalLatency() + 0.0001);
for (; it != occupiers.end(); it++) {
it->second += accumulated_delay;
}
return true;
}
absl::StatusOr<HloGraphNode::TimeCost> DefaultSchedulerCore::ScheduleNode(
HloGraphNode* n, DefaultSchedulerCore::SchedulingState* sched_state) const {
sched_state->new_sequence_reversed.push_back(
const_cast<HloInstruction*>(&n->GetInstr()));
n->SetScheduled();
if (sched_state->config.enable_selective_resources &&
n->ReleasesSelectiveResource()) {
auto it = std::find(sched_state->selective_resource_releasers.begin(),
sched_state->selective_resource_releasers.end(), n);
if (it == sched_state->selective_resource_releasers.end()) {
LOG(WARNING) << "Selective resource releasers list does not contain node "
"that releases a selective resource: "
<< n->ToString();
} else {
sched_state->selective_resource_releasers.erase(it);
}
}
if (sched_state->config.enable_selective_resources &&
!n->GetValuableForSelectiveOverlap()) {
for (HloGraphNode* node : sched_state->selective_resource_releasers) {
node->SetReadyTime(node->GetReadyTime() + n->GetCost());
}
}
for (auto& resource : n->GetResources()) {
if (resource.second == ResourceUsageType::kResourceRelease) {
++(sched_state->max_concurrent_resource[resource.first]);
} else if (resource.second == ResourceUsageType::kResourceOccupy) {
--(sched_state->max_concurrent_resource[resource.first]);
--(sched_state->resource_users_in_queue[resource.first]);
}
}
HloGraphNode::TimeCost schedule_time = sched_state->current_time;
for (const HloEdge& pred : n->GetSuccessors()) {
const HloGraphNode::TimeCost time_from_edge =
pred.Target().GetReadyTime() + pred.Latency();
schedule_time = std::max(schedule_time, time_from_edge);
if (sched_state->config.resource_sharing) {
auto occupied_resources = n->GetShareableResourcesOnEdge(pred);
for (const int64_t resource : occupied_resources) {
auto occupiers = sched_state->shareable_resource_occupiers[resource];
for (auto [occupier_edge, edge_pft] : occupiers) {
if (occupier_edge == &pred) {
VLOG(3) << "Ready time of scheduled node " << n->GetInstr().name()
<< " before update with pft: " << edge_pft
<< ", ready_time: " << schedule_time;
schedule_time = std::max(schedule_time, edge_pft);
VLOG(3) << "Ready time of scheduled node " << n->GetInstr().name()
<< " after update with pft: " << edge_pft
<< ", ready_time: " << schedule_time;
}
}
}
}
}
n->SetReadyTime(schedule_time);
HloGraphNode::TimeCost current_time = schedule_time + n->GetCost();
if (sched_state->config.resource_sharing) {
for (HloEdge& edge : n->GetSuccessors()) {
auto released_resources = n->GetShareableResourcesOnEdge(edge);
for (const int64_t resource : released_resources) {
CHECK(DeleteOccupierFromResource(
schedule_time, edge,
sched_state->shareable_resource_occupiers[resource]));
if (VLOG_IS_ON(2)) {
VLOG(3) << "Occupier list for "
<< sched_state->async_tracker->GetResourceName(resource)
<< ": ";
PrintOccupierList(
sched_state->shareable_resource_occupiers[resource]);
}
}
}
for (HloEdge& edge : n->GetPredecessors()) {
for (HloEdge& inverse_edge : edge.Target().GetSuccessors()) {
if (&(inverse_edge.Target()) == n) {
auto occupied_resources =
edge.Target().GetShareableResourcesOnEdge(inverse_edge);
for (const int64_t resource : occupied_resources) {
CHECK(AddOccupierToResource(
current_time, inverse_edge,
sched_state->shareable_resource_occupiers[resource]));
if (VLOG_IS_ON(2)) {
VLOG(3) << "Occupier list for "
<< sched_state->async_tracker->GetResourceName(resource)
<< ": ";
PrintOccupierList(
sched_state->shareable_resource_occupiers[resource]);
}
}
break;
}
}
}
}
auto ready_time_cmp = [](const HloGraphNode* a, const HloGraphNode* b) {
return a->GetReadyTime() > b->GetReadyTime();
};
while (!sched_state->next_ready_stack.empty()) {
const HloGraphNode* node = sched_state->next_ready_stack.front();
if (node->GetReadyTime() < current_time) {
std::pop_heap(sched_state->next_ready_stack.begin(),
sched_state->next_ready_stack.end(), ready_time_cmp);
sched_state->next_ready_stack.pop_back();
continue;
}
break;
}
for (HloEdge& edge : n->GetPredecessors()) {
const int64_t current_outdegree = edge.Target().GetOutdegree();
if (current_outdegree != 1) {
edge.Target().SetOutdegree(current_outdegree - 1);
continue;
}
edge.Target().SetOutdegree(0);
LatencyEstimator::TimeCost ready_time = current_time;
for (const HloEdge& pred : edge.Target().GetSuccessors()) {
const LatencyEstimator::TimeCost edge_time =
pred.Target().GetReadyTime() + pred.Latency();
ready_time = std::max(ready_time, edge_time);
if (sched_state->config.resource_sharing) {
auto occupied_resources =
edge.Target().GetShareableResourcesOnEdge(pred);
for (const int64_t resource : occupied_resources) {
auto occupiers = sched_state->shareable_resource_occupiers[resource];
for (auto [occupier_edge, edge_pft] : occupiers) {
if (occupier_edge == &pred) {
VLOG(3) << "Ready time of predecessor "
<< edge.Target().GetInstr().name()
<< " before update with pft: " << edge_pft
<< ", ready_time: " << ready_time;
ready_time = std::max(ready_time, edge_pft);
VLOG(3) << "Ready time of predecessor "
<< edge.Target().GetInstr().name()
<< " after update with pft: " << edge_pft
<< ", ready_time: " << ready_time;
}
}
}
}
}
for (auto& resource : edge.Target().GetResources()) {
if (resource.second == ResourceUsageType::kResourceOccupy) {
++(sched_state->resource_users_in_queue[resource.first]);
}
}
edge.Target().SetReadyTime(ready_time);
sched_state->ready_set.push_back(&edge.Target());
if (edge.Target().GetReadyTime() > current_time) {
sched_state->next_ready_stack.push_back(&edge.Target());
std::push_heap(sched_state->next_ready_stack.begin(),
sched_state->next_ready_stack.end(), ready_time_cmp);
}
if (sched_state->config.enable_selective_resources &&
edge.Target().ReleasesSelectiveResource()) {
sched_state->selective_resource_releasers.push_back(&edge.Target());
}
}
++sched_state->scheduled_count;
for (auto& resource : n->GetResources()) {
if (resource.second == ResourceUsageType::kResourceRelease) {
--sched_state->resources_in_flight[resource.first];
} else if (resource.second == ResourceUsageType::kResourceOccupy) {
++sched_state->resources_in_flight[resource.first];
}
}
VLOG(10) << "Memory pressure before schedule: "
<< sched_state->memory_pressure_tracker->memory_usage();
VLOG(10)
<< "Memory peak before schedule: "
<< sched_state->memory_pressure_tracker->pressure_state().memory_peak;
sched_state->memory_pressure_tracker->UpdateBuffers(&n->GetInstr());
VLOG(10) << "Memory pressure after schedule: "
<< sched_state->memory_pressure_tracker->memory_usage();
VLOG(10)
<< "Memory peak after schedule: "
<< sched_state->memory_pressure_tracker->pressure_state().memory_peak;
return current_time;
}
std::string HloEdge::ToString() const {
return absl::StrCat("\tEdge: ", target_->GetInstr().name(),
" latency: ", Latency(), "\n");
}
bool HloScheduleGraph::IsPredecessorTransitively(
const HloGraphNode* node, const HloGraphNode* possible_predecessor) {
absl::flat_hash_set<const HloGraphNode*> visited = {possible_predecessor};
std::vector<const HloGraphNode*> to_visit_queue = {node};
while (!to_visit_queue.empty()) {
const HloGraphNode* curr = to_visit_queue.back();
to_visit_queue.pop_back();
if (curr == possible_predecessor) {
return true;
}
if (visited.contains(curr)) {
continue;
}
visited.insert(curr);
for (const auto& edge : curr->GetPredecessors()) {
auto user_node_it = nodes_.find(&edge.Target().GetInstr());
to_visit_queue.push_back(user_node_it->second.get());
}
}
return false;
}
HloScheduleGraph::HloScheduleGraph(
const std::vector<HloInstruction*>* post_order_instructions,
HloAliasAnalysis* alias_analysis, const LatencyEstimator* latency_estimator,
const AsyncTracker* async_tracker)
: original_order_(post_order_instructions->begin(),
post_order_instructions->end()) {
HloComputation* comp = (*post_order_instructions)[0]->parent();
auto reachability = HloReachabilityMap::Build(comp);
int64_t current_pos = 0;
std::vector<const HloInstruction*> while_instrs;
for (HloInstruction* instr : *post_order_instructions) {
auto [new_node_it, inserted] = nodes_.try_emplace(
instr, std::make_unique<HloGraphNode>(instr, current_pos));
CHECK(inserted) << "Expected the value to not be already in the map";
instr_order_map_[instr] = current_pos++;
new_node_it->second->predecessors_.reserve(instr->operand_count());
new_node_it->second->successors_.reserve(instr->user_count());
new_node_it->second->cost_ = latency_estimator->NodeCost(instr);
new_node_it->second->resources_ =
async_tracker->GetResourcesFromInstruction(*instr);
new_node_it->second->released_shareable_resources_ =
async_tracker->GetReleasedShareableResourcesFromVector(
new_node_it->second->GetResources());
new_node_it->second->occupied_shareable_resources_ =
async_tracker->GetOccupiedShareableResourcesFromVector(
new_node_it->second->GetResources());
new_node_it->second->releases_selective_resource_ =
async_tracker->ReleasesSelectiveResource(new_node_it->second.get());
new_node_it->second->occupies_selective_resource_ =
async_tracker->OccupiesSelectiveResource(new_node_it->second.get());
if (instr->opcode() == HloOpcode::kWhile) {
while_instrs.push_back(instr);
}
}
auto add_dependency_helper = [latency_estimator](HloGraphNode* from,
HloGraphNode* to) {
const LatencyEstimator::TimeCost latency =
latency_estimator->GetLatencyBetween(*from, *to);
from->successors_.push_back(HloEdge(latency, to));
to->predecessors_.push_back(HloEdge(latency, from));
++to->indegree_;
++from->outdegree_;
};
for (const HloInstruction* instr : *post_order_instructions) {
auto node_it = nodes_.find(instr);
CHECK(node_it != nodes_.end()) << "We should have just allocated a node";
HloGraphNode* instr_node = node_it->second.get();
VLOG(10) << "Adding users for " << instr_node->GetInstr().ToString();
for (const HloInstruction* user : instr->users()) {
VLOG(10) << "\tUser: " << user->ToString();
auto user_node_it = nodes_.find(user);
CHECK(user_node_it != nodes_.end());
HloGraphNode* user_node = user_node_it->second.get();
add_dependency_helper(instr_node, user_node);
}
for (const HloInstruction* ctrl_succ : instr->control_successors()) {
VLOG(10) << "\tCtrl Successor: " << ctrl_succ->ToString();
auto ctrl_succ_node_it = nodes_.find(ctrl_succ);
CHECK(ctrl_succ_node_it != nodes_.end());
HloGraphNode* ctrl_succ_node = ctrl_succ_node_it->second.get();
add_dependency_helper(instr_node, ctrl_succ_node);
}
if (async_tracker->IsSupportedAsyncDone(*instr)) {
const HloInstruction* async_start = instr->operand(0);
if (alias_analysis != nullptr) {
for (const HloBuffer* buffer :
alias_analysis->ComputeBuffersAt(instr, {})) {
for (const HloValue* value : buffer->values()) {
if (value->defining_instruction() == instr) {
continue;
}
for (const HloUse& use : value->GetUses()) {
if (ContainsKey(instr_order_map_, use.instruction)) {
if (use.instruction == async_start ||
reachability->IsReachable(instr, use.instruction)) {
continue;
}
auto it = nodes_.find(use.instruction);
CHECK(it != nodes_.end());
HloGraphNode* pred_node = it->second.get();
it = nodes_.find(async_start);
CHECK(it != nodes_.end());
HloGraphNode* start_node = it->second.get();
if (IsPredecessorTransitively(pred_node, start_node)) {
continue;
}
pred_node->successors_.push_back(HloEdge(1, start_node));
start_node->predecessors_.push_back(HloEdge(1, pred_node));
++pred_node->outdegree_;
++start_node->indegree_;
}
}
}
}
}
}
if (instr->opcode() == HloOpcode::kSendDone) {
for (const auto* ctrl_pred : instr->control_predecessors()) {
if (ctrl_pred->opcode() != HloOpcode::kRecvDone) {
continue;
}
const HloInstruction* dependent_while_instr = nullptr;
for (const auto* while_hlo : while_instrs) {
if (!reachability->IsReachable(ctrl_pred, while_hlo)) {
continue;
}
if (dependent_while_instr == nullptr) {
dependent_while_instr = while_hlo;
continue;
}
if (OriginalInstructionPosition(while_hlo) <
OriginalInstructionPosition(dependent_while_instr)) {
dependent_while_instr = while_hlo;
}
}
if (dependent_while_instr != nullptr) {
auto send_done_it = nodes_.find(instr);
CHECK(send_done_it != nodes_.end());
HloGraphNode* send_done_node = send_done_it->second.get();
auto while_it = nodes_.find(dependent_while_instr);
CHECK(while_it != nodes_.end());
HloGraphNode* while_node = while_it->second.get();
send_done_node->successors_.push_back(HloEdge(1, while_node));
while_node->predecessors_.push_back(HloEdge(1, send_done_node));
++send_done_node->outdegree_;
++while_node->indegree_;
}
break;
}
}
}
}
std::string HloScheduleGraph::ToString(
const AsyncTracker* async_tracker) const {
std::string result;
std::vector<std::pair<const HloGraphNode*, int>> stack;
for (const auto& node : nodes_) {
if (node.second->predecessors_.empty()) {
stack.push_back(std::make_pair(node.second.get(), 0));
}
}
std::vector<const HloGraphNode*> order;
absl::flat_hash_set<const HloGraphNode*> visited;
while (!stack.empty()) {
auto& val = stack.back();
if (val.second == val.first->successors_.size()) {
order.push_back(val.first);
stack.pop_back();
continue;
}
const int64_t next_child = val.second++;
if (visited.insert(&val.first->successors_[next_child].Target()).second) {
stack.push_back(
std::make_pair(&val.first->successors_[next_child].Target(), 0));
}
}
for (auto it = order.rbegin(), e = order.rend(); it != e; ++it) {
absl::StrAppend(&result, (*it)->ToString(async_tracker));
}
return result;
}
HloGraphNode& HloScheduleGraph::GetNode(const HloInstruction* instr) const {
auto it = nodes_.find(instr);
CHECK(it != nodes_.end());
return *it->second;
}
std::vector<HloGraphNode*> HloScheduleGraph::FindBottomRoots() const {
std::vector<HloGraphNode*> roots;
for (const HloInstruction* instr : original_order_) {
HloGraphNode& node = GetNode(instr);
if (node.GetOutdegree() == 0) {
roots.push_back(&node);
}
}
return roots;
}
std::vector<HloGraphNode*> HloScheduleGraph::FindTopRoots() const {
std::vector<HloGraphNode*> roots;
for (const HloInstruction* instr : original_order_) {
HloGraphNode& node = GetNode(instr);
if (node.GetIndegree() == 0) {
roots.push_back(&node);
}
}
return roots;
}
void HloScheduleGraph::InitializeGraphAnalysis(
const AsyncTracker* async_tracker) {
absl::flat_hash_map<HloGraphNode*, int> current_rank;
std::vector<HloGraphNode*> stack;
for (const HloInstruction* instr : original_order_) {
HloGraphNode& node = GetNode(instr);
current_rank[&node] = node.GetIndegree();
node.SetAsyncDepth(0.0);
node.SetDepth(0.0);
node.SetGraphDepth(0);
if (node.GetIndegree() == 0) {
stack.push_back(&node);
}
}
while (!stack.empty()) {
auto* node = stack.back();
stack.pop_back();
if (async_tracker->OccupiesSelectiveResource(node)) {
node->num_hops_to_closest_selective_resource_occupier_ = 0;
} else {
int64_t closest_predecessor_distance =
std::numeric_limits<int64_t>::max();
for (auto& pred : node->GetPredecessors()) {
closest_predecessor_distance = std::min(
closest_predecessor_distance,
pred.Target().num_hops_to_closest_selective_resource_occupier_);
}
if (closest_predecessor_distance != std::numeric_limits<int64_t>::max()) {
node->num_hops_to_closest_selective_resource_occupier_ =
closest_predecessor_distance + 1;
}
}
if (async_tracker->IsSupportedAsyncDone(node->GetInstr())) {
for (auto& pred : node->GetPredecessors()) {
node->SetAsyncDepth(
std::max(pred.Target().GetAsyncDepth() + pred.Latency(),
node->GetAsyncDepth()));
node->SetDepth(std::max(
pred.Target().GetDepth() + pred.Target().GetCost() + pred.Latency(),
node->GetDepth()));
node->SetGraphDepth(
std::max(pred.Target().GetGraphDepth() + 1, node->GetGraphDepth()));
}
} else {
for (auto& pred : node->GetPredecessors()) {
node->SetAsyncDepth(
std::max(pred.Target().GetAsyncDepth(), node->GetAsyncDepth()));
node->SetDepth(std::max(
pred.Target().GetDepth() + pred.Target().GetCost() + pred.Latency(),
node->GetDepth()));
node->SetGraphDepth(
std::max(pred.Target().GetGraphDepth() + 1, node->GetGraphDepth()));
}
}
for (auto& succ : node->GetSuccessors()) {
if (--current_rank[&succ.Target()] == 0) {
stack.push_back(&succ.Target());
}
}
}
}
absl::Status DefaultSchedulerCore::InitializeScheduler(
const HloModule* module) {
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module));
module_pressure_state_ = std::make_unique<ModulePressureState>(
module, alias_analysis_.get(), shape_size_bytes_);
module_pressure_state_->InitializePressureStates();
module_pressure_state_->SetMemoryPeak(0);
return absl::OkStatus();
}
absl::Status DefaultSchedulerCore::SchedulingStep(
SchedulingState* sched_state) {
TF_ASSIGN_OR_RETURN(HloGraphNode * node,
FindAndExtractBestNodeAvailable(
*sched_state, nullptr));
CHECK(node != nullptr);
TF_ASSIGN_OR_RETURN(sched_state->current_time,
ScheduleNode(node, sched_state));
VLOG(2) << "Scheduled: " << node->GetInstr().name();
XLA_VLOG_LINES(5, node->ToString());
return absl::OkStatus();
}
absl::StatusOr<std::vector<HloInstruction*>>
DefaultSchedulerCore::ScheduleComputation(const HloComputation* computation) {
const HloSchedule& module_schedule = computation->parent()->schedule();
MemoryPressureTracker memory_pressure_tracker(
alias_analysis_.get(), module_pressure_state_->buffer_tracker(),
module_pressure_state_->pressure_state_cache());
memory_pressure_tracker.Initialize(
computation,
module_pressure_state_->GetPressureStateForComputation(computation)
.live_ids_at_bottom);
SchedulingState sched_state(
&module_schedule.sequence(computation), alias_analysis_.get(),
latency_estimator_, async_tracker_, &memory_pressure_tracker, config_);
async_tracker_->PostProcessScheduleGraph(&sched_state.sched_graph,
latency_estimator_);
sched_state.sched_graph.InitializeGraphAnalysis(async_tracker_);
VLOG(5) << "Just built graph:";
XLA_VLOG_LINES(5, sched_state.sched_graph.ToString(async_tracker_));
async_tracker_->SetConcurrentResourceLimits(
sched_state.max_concurrent_resource);
auto roots = sched_state.sched_graph.FindBottomRoots();
for (HloGraphNode* root : roots) {
root->SetReadyTime(0.0);
}
VLOG(5) << "Initial memory pressure for " << computation->name() << ": "
<< memory_pressure_tracker.memory_usage();
sched_state.ready_set.insert(sched_state.ready_set.end(), roots.begin(),
roots.end());
while (!sched_state.ready_set.empty()) {
VLOG(10) << "Current ready time: " << sched_state.current_time;
VLOG(2) << "Current ready queue:";
XLA_VLOG_LINES(2, [&sched_state]() {
struct LogFormatter {
void operator()(std::string* out, const HloGraphNode* n) const {
out->append(absl::StrCat("\t", n->GetInstr().name(),
" Ready time: ", n->GetReadyTime(),
" Depth: ", n->GetGraphDepth()));
}
};
return absl::StrJoin(sched_state.ready_set, "\n", LogFormatter());
}());
TF_RETURN_IF_ERROR(SchedulingStep(&sched_state));
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "New order";
for (auto r_it = sched_state.new_sequence_reversed.rbegin(),
e_it = sched_state.new_sequence_reversed.rend();
r_it != e_it; ++r_it) {
LogInstruction(*r_it);
}
}
module_pressure_state_->UpdatePressureStateForComputation(
computation, memory_pressure_tracker.pressure_state());
absl::c_reverse(sched_state.new_sequence_reversed);
if (post_processing_fn_) {
post_processing_fn_(sched_state);
}
CHECK_EQ(sched_state.new_sequence_reversed.size(),
sched_state.sched_graph.GetOriginalInstrList().size())
<< "Not all instructions have been scheduled "
<< sched_state.new_sequence_reversed.size() << " vs "
<< sched_state.sched_graph.GetOriginalInstrList().size();
VLOG(2) << "Total time: "
<< sched_state.sched_graph
.GetNode(sched_state.new_sequence_reversed.front())
.GetReadyTime();
const auto& debug_options = xla::GetDebugOptionsFromFlags();
if (debug_options.xla_dump_latency_hiding_schedule() &&
computation->IsEntryComputation()) {
int core_freq = latency_estimator_->CyclesPerMicrosecond();
DumpLatencyHidingSchedule(computation, sched_state.sched_graph,
sched_state.new_sequence_reversed, core_freq,
debug_options);
}
return std::move(sched_state.new_sequence_reversed);
}
void DefaultSchedulerCore::DumpLatencyHidingSchedule(
const HloComputation* computation, const HloScheduleGraph& schedule_graph,
const std::vector<HloInstruction*>& instructions,
const int cycles_per_microsecond, const DebugOptions& debug_options) {
ScheduleProto proto;
proto.set_computation_id(computation->unique_id());
proto.set_cycles_per_microsecond(cycles_per_microsecond);
const HloGraphNode& first_node = schedule_graph.GetNode(instructions.front());
const double total_time = first_node.GetReadyTime() + first_node.GetCost();
for (const HloInstruction* instr : instructions) {
const HloGraphNode& instr_node = schedule_graph.GetNode(instr);
const double start_time =
total_time - (instr_node.GetReadyTime() + instr_node.GetCost());
const double end_time = start_time + instr_node.GetCost();
ScheduleProto::Instruction* instr_msg = proto.add_instructions();
instr_msg->set_id(instr->unique_id());
instr_msg->set_start_timestamp_cycles(start_time);
instr_msg->set_end_timestamp_cycles(end_time);
}
*proto.mutable_hlo_module() = computation->parent()->ToProto();
const std::string fn = absl::StrFormat("%s.schedule", computation->name());
DumpProtobufToFile(proto, debug_options, fn);
}
LatencyHidingScheduler::SchedulerStatistics
LatencyHidingScheduler::LatencyHidingStatistics(
const HloComputation* computation,
const LatencyEstimator* latency_estimator,
const AsyncTracker* async_tracker,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes) {
const HloModule* module = computation->parent();
absl::flat_hash_map<
HloOpcode,
std::vector<std::tuple<const HloInstruction*, int64_t, int64_t>>>
outstanding_collectives;
double current_time = 0;
enum class AsyncKind {
kNotAsync,
kAllGather,
kAllReduce,
kCollectivePermute,
kAllToAll,
kReduceScatter,
kSend,
kRecv,
kCollectiveBroadcast,
};
auto opcode_to_async_kind = [](HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kAllGather:
return AsyncKind::kAllGather;
case HloOpcode::kAllReduce:
return AsyncKind::kAllReduce;
case HloOpcode::kCollectiveBroadcast:
return AsyncKind::kCollectiveBroadcast;
case HloOpcode::kCollectivePermute:
return AsyncKind::kCollectivePermute;
case HloOpcode::kAllToAll:
return AsyncKind::kAllToAll;
case HloOpcode::kReduceScatter:
return AsyncKind::kReduceScatter;
case HloOpcode::kSend:
return AsyncKind::kSend;
case HloOpcode::kRecv:
return AsyncKind::kRecv;
default:
return AsyncKind::kNotAsync;
}
};
auto find_node_successor_edge = [](const HloGraphNode& graph_node,
const HloGraphNode& successor_node) {
auto edge_it = std::find_if(graph_node.GetSuccessors().begin(),
graph_node.GetSuccessors().end(),
[&successor_node](const HloEdge& edge) {
return &edge.Target() == &successor_node;
});
CHECK(edge_it != graph_node.GetSuccessors().end());
return edge_it;
};
auto find_outstanding_async = [&outstanding_collectives,
async_tracker](const HloInstruction* instr) {
const auto& collective_vec =
outstanding_collectives[async_tracker->GetCanonicalAsyncOp(*instr)
.inner];
auto it = absl::c_find_if(
collective_vec,
[instr](const std::tuple<const HloInstruction*, int64_t, int64_t>& p) {
return instr == std::get<0>(p);
});
CHECK(it != collective_vec.end());
return it;
};
absl::flat_hash_map<AsyncKind, double> wasted_time_per_collective;
SchedulerConfig config;
config.schedule_send_recvs = true;
config.use_real_cost_model = true;
std::unique_ptr<HloAliasAnalysis> hlo_alias_analysis =
HloAliasAnalysis::Run(module).value();
auto instructions_post_order = computation->MakeInstructionPostOrder();
HloScheduleGraph schedule_graph(&instructions_post_order,
nullptr, latency_estimator,
async_tracker);
async_tracker->PostProcessScheduleGraph(&schedule_graph, latency_estimator);
int64_t curr_pos = 0;
for (const HloInstruction* instr :
module->schedule().sequence(computation).instructions()) {
const HloGraphNode& instr_node = schedule_graph.GetNode(instr);
current_time += instr_node.GetCost();
if (async_tracker->IsSupportedAsyncStart(*instr)) {
outstanding_collectives[async_tracker->GetCanonicalAsyncOp(*instr).inner]
.push_back({instr, current_time, curr_pos});
} else if (async_tracker->IsSupportedAsyncDone(*instr)) {
const HloInstruction* start_instr = instr->operand(0);
if (async_tracker->IsSupportedAsyncStart(*start_instr)) {
auto it = find_outstanding_async(start_instr);
const HloGraphNode& start_node =
schedule_graph.GetNode(std::get<0>(*it));
auto edge_it = find_node_successor_edge(start_node, instr_node);
const double async_wasted_cycles = std::max(
0.0, edge_it->Latency() - (current_time - std::get<1>(*it)));
AsyncKind kind = opcode_to_async_kind(
async_tracker->GetCanonicalAsyncOp(*start_instr).inner);
wasted_time_per_collective[kind] += async_wasted_cycles;
current_time += async_wasted_cycles;
}
}
curr_pos++;
}
ModulePressureState module_pressure_state(module, hlo_alias_analysis.get(),
shape_size_bytes);
module_pressure_state.InitializePressureStates();
const MemoryPressureTracker::MemoryPressureState* memory_pressure_state =
module_pressure_state.ComputationIsMemoryTracked(computation)
? &module_pressure_state.GetPressureStateForComputation(computation)
: nullptr;
MemoryPressureTracker mem_pressure_tracker(
hlo_alias_analysis.get(), module_pressure_state.buffer_tracker(),
module_pressure_state.pressure_state_cache());
if (memory_pressure_state != nullptr) {
mem_pressure_tracker.Initialize(computation,
memory_pressure_state->live_ids_at_bottom);
}
return LatencyHidingScheduler::SchedulerStatistics{
computation,
wasted_time_per_collective[AsyncKind::kAllGather],
wasted_time_per_collective[AsyncKind::kAllReduce],
wasted_time_per_collective[AsyncKind::kCollectiveBroadcast],
wasted_time_per_collective[AsyncKind::kCollectivePermute],
wasted_time_per_collective[AsyncKind::kAllToAll],
wasted_time_per_collective[AsyncKind::kReduceScatter],
wasted_time_per_collective[AsyncKind::kSend],
wasted_time_per_collective[AsyncKind::kRecv],
current_time,
memory_pressure_state ? mem_pressure_tracker.initial_memory_pressure() +
memory_pressure_state->memory_peak
: 0};
}
std::string LatencyHidingScheduler::SchedulerStatisticsString(
const SchedulerStatistics& sched_stats) {
std::string result;
if (const HloComputation* comp = sched_stats.computation) {
absl::StrAppend(&result, "For computation: ", comp->name(), ", module ",
comp->parent()->name(), "(", comp->parent()->unique_id(),
")\n");
}
absl::StrAppend(&result, "Total wasted cycles: ",
sched_stats.all_gather_wasted_cycles +
sched_stats.all_reduce_wasted_cycles +
sched_stats.collective_broadcast_wasted_cycles +
sched_stats.collective_permute_wasted_cycles +
sched_stats.all_to_all_wasted_cycles +
sched_stats.reduce_scatter_wasted_cycles +
sched_stats.send_wasted_cycles +
sched_stats.recv_wasted_cycles,
"\n");
absl::StrAppend(&result, "Wasted cycles for all-reduce: ",
sched_stats.all_reduce_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for all-gather: ",
sched_stats.all_gather_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for collective-broadcast: ",
sched_stats.collective_broadcast_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for collective-permute: ",
sched_stats.collective_permute_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for all-to-all: ",
sched_stats.all_to_all_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for reduce-scatter: ",
sched_stats.reduce_scatter_wasted_cycles, "\n");
absl::StrAppend(&result,
"Wasted cycles for send: ", sched_stats.send_wasted_cycles,
"\n");
absl::StrAppend(&result,
"Wasted cycles for recv: ", sched_stats.recv_wasted_cycles,
"\n");
absl::StrAppend(&result, "Total cycles: ", sched_stats.total_cycles, "\n");
absl::StrAppend(&result, "Memory pressure peak (bytes): ",
sched_stats.memory_pressure_peak, "\n");
return result;
}
void LatencyHidingScheduler::LogScheduleStatistics(
const HloComputation* computation) {
XLA_VLOG_LINES(1, SchedulerStatisticsString(LatencyHidingStatistics(
computation, latency_estimator_.get(),
async_tracker_.get(), shape_size_bytes_)));
}
absl::StatusOr<bool> LatencyHidingScheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(5) << "Original module:";
XLA_VLOG_LINES(5, module->ToString());
std::vector<HloComputation*> computations_to_schedule;
computations_to_schedule.reserve(module->computation_count());
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instr : computation->instructions()) {
if (async_tracker_->IsSupportedAsyncStart(*instr) ||
async_tracker_->IsSupportedAsyncDone(*instr)) {
computations_to_schedule.push_back(computation);
break;
}
}
}
if (computations_to_schedule.empty()) {
return false;
}
absl::flat_hash_map<HloComputation*, std::vector<HloInstruction*>>
saved_schedules;
TF_RETURN_IF_ERROR(scheduler_core_->InitializeScheduler(module));
for (HloComputation* computation : computations_to_schedule) {
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> new_schedule,
scheduler_core_->ScheduleComputation(computation));
saved_schedules[computation] = std::move(new_schedule);
}
uint64_t initial_memory_limit = scheduler_core_->GetMemoryLimit();
for (int64_t iter = 0;
iter < scheduler_core_->GetRerunTimes() &&
scheduler_core_->GetMemoryPeak() > initial_memory_limit;
iter++) {
LOG(INFO) << "LatencyHidingScheduler current memory usage: "
<< scheduler_core_->GetMemoryPeak()
<< " bytes, does not fit in limit: "
<< scheduler_core_->GetMemoryLimit()
<< ". Setting the new limit to "
<< scheduler_core_->GetMemoryLimit() * 0.9;
TF_RETURN_IF_ERROR(scheduler_core_->InitializeScheduler(module));
scheduler_core_->SetMemoryLimit(scheduler_core_->GetMemoryLimit() * 0.9);
for (HloComputation* computation : computations_to_schedule) {
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> new_schedule,
scheduler_core_->ScheduleComputation(computation));
saved_schedules[computation] = std::move(new_schedule);
}
}
LOG(INFO) << "LatencyHidingScheduler current memory usage: "
<< scheduler_core_->GetMemoryPeak()
<< " bytes. Current limit: " << scheduler_core_->GetMemoryLimit();
for (HloComputation* computation : computations_to_schedule) {
VLOG(1) << "Statistics before scheduling:";
LogScheduleStatistics(computation);
module->schedule().set_sequence(
computation, absl::MakeConstSpan(saved_schedules[computation]));
VLOG(1) << "Statistics after scheduling:";
LogScheduleStatistics(computation);
}
return true;
}
} | #include "xla/service/latency_hiding_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/async_collective_creator.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
constexpr int kMaxConcurrentAsyncCollectivePermutes = 5;
int PositionInVector(absl::Span<HloInstruction* const> vec,
const HloInstruction* element) {
return std::distance(vec.begin(), std::find(vec.begin(), vec.end(), element));
}
bool MaxConcurrentCollectivePermutesBelowThreshold(
absl::Span<HloInstruction* const> instruction_sequence) {
int max_concurrent_collective_permutes = 0;
int num_concurrent_collective_permutes = 0;
for (HloInstruction* instruction : instruction_sequence) {
if (instruction->opcode() == HloOpcode::kCollectivePermuteStart) {
num_concurrent_collective_permutes += 1;
max_concurrent_collective_permutes =
std::max(max_concurrent_collective_permutes,
num_concurrent_collective_permutes);
}
if (instruction->opcode() == HloOpcode::kCollectivePermuteDone) {
num_concurrent_collective_permutes -= 1;
}
}
int max_num_collective_permutes_threshold =
kMaxConcurrentAsyncCollectivePermutes;
return max_concurrent_collective_permutes <=
max_num_collective_permutes_threshold;
}
int GetIndex(absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
}
int GetOpcodeIndexUsingMetaData(
HloOpcode opcode, absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view metadata_name) {
return absl::c_find_if(instruction_sequence,
[metadata_name, opcode](HloInstruction* instruction) {
return instruction->metadata().op_name() ==
metadata_name &&
instruction->opcode() == opcode;
}) -
instruction_sequence.begin();
}
SchedulerConfig GetDefaultSchedConfig() {
SchedulerConfig sched_cfg;
sched_cfg.collective_permute_overlap_limit =
kMaxConcurrentAsyncCollectivePermutes;
sched_cfg.send_recv_overlap_limit = INT32_MAX;
return sched_cfg;
}
class TestLatencyEstimator : public LatencyEstimator {
public:
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const override {
static constexpr TimeCost kLowLatency = 1.0;
if (from.GetInstr().opcode() == HloOpcode::kCollectivePermuteStart &&
target.GetInstr().opcode() == HloOpcode::kCollectivePermuteDone) {
return kLowLatency *
ShapeUtil::ElementsIn(from.GetInstr().operand(0)->shape());
}
return kLowLatency;
}
TimeCost NodeCost(const HloInstruction* instr) const override {
if (instr->IsLoopFusion()) {
return instr->shape().IsTuple()
? kMediumCost
: kLowCost * ShapeUtil::ElementsIn(instr->shape());
}
if (instr->IsOutputFusion() || instr->opcode() == HloOpcode::kConvolution) {
return instr->shape().IsTuple()
? kHighCost
: kMediumCost * ShapeUtil::ElementsIn(instr->shape());
}
return kLowCost;
}
int CyclesPerMicrosecond() const override { return 1; }
public:
static constexpr TimeCost kLowCost = 1.0;
static constexpr TimeCost kMediumCost = 1000.0;
static constexpr TimeCost kHighCost = 5000.0;
};
absl::StatusOr<bool> RunScheduler(
HloModule* module, SchedulerConfig sched_config = GetDefaultSchedConfig(),
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>(),
std::unique_ptr<AsyncTracker> async_tracker = nullptr) {
AsyncCollectiveCreator::CollectiveCreatorConfig config{
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue};
TF_ASSIGN_OR_RETURN(bool value,
AsyncCollectiveCreator(std::move(config)).Run(module));
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
if (!async_tracker) {
async_tracker = std::make_unique<AsyncTracker>(sched_config);
}
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
value, LatencyHidingScheduler(std::move(latency_estimator),
std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
}
class LatencyHidingSchedulerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(
auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest()));
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(hlo_module));
}
};
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncSimple) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, c0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 1);
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[1,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done),
metadata={op_type="Bitcast" op_name="ag0"}
%ag-start.2 = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done.2 = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
%ag-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done.2),
metadata={op_type="Bitcast" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllGather" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllGather" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ag-done-bc.2, %ag-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncReshaped) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[1,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done),
metadata={op_type="Bitcast" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done-bc, c0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 2);
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kBitcast,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 1);
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, %ag-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
c0 = f32[16,256,256]{2,1,0} convolution(ag-done, ag-done.2),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%c0, %c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped3) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
c0 = f32[16,256,256]{2,1,0} convolution(ag-done, ag-done.2),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, %ag-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllReduceAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %a = f32[] add(p0, p1)
}
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[2,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ar-start = f32[2,8,256,256] all-reduce-start(
f32[2,8,256,256] %color_operand.1), replica_groups={{0,1}}, to_apply=%add,
metadata={op_type="AllReduce" op_name="ar0"}
%ar-start.2 = f32[2,8,256,256] all-reduce-start(
f32[2,8,256,256] %color_operand.2), replica_groups={{0,1}}, to_apply=%add,
metadata={op_type="AllReduce" op_name="ar1"}
%ar-done = f32[2,8,256,256] all-reduce-done(
f32[2,8,256,256] %ar-start),
metadata={op_type="AllReduce" op_name="ar0"}
%ar-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ar-done),
metadata={op_type="Bitcast" op_name="ar0"}
%ar-done.2 = f32[2,8,256,256] all-reduce-done(
f32[2,8,256,256] %ar-start.2),
metadata={op_type="AllReduce" op_name="ar1"}
%ar-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ar-done.2),
metadata={op_type="Bitcast" op_name="ar1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllReduce" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllReduce" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ar-done-bc.2, %ar-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceDone,
new_instruction_sequence, "ar0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceStart,
new_instruction_sequence, "ar0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceDone,
new_instruction_sequence, "ar1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceStart,
new_instruction_sequence, "ar1"));
}
TEST_F(LatencyHidingSchedulerTest, WhileLoopAliasingBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT add = bf16[8]{0} add(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* while_body = hlo_module->GetComputationWithName("while_body");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(while_body).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* cp_start =
while_body->root_instruction()->operand(0)->operand(0);
EXPECT_EQ(cp_start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_LT(GetIndex(new_instruction_sequence, "add0"),
GetIndex(new_instruction_sequence, cp_start->name()));
}
TEST_F(LatencyHidingSchedulerTest, WhileLoopAliasingBug2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1
gte2 = pred[] get-tuple-element(param), index=2
negate1 = bf16[8]{0} negate(gte1)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
negate0 = bf16[8]{0} negate(collective-permute.1)
collective-permute.2 = bf16[8]{0} collective-permute(negate1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate0, gte2)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT add = bf16[8]{0} add(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* while_body = hlo_module->GetComputationWithName("while_body");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(while_body).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* cp_start_2 =
while_body->root_instruction()->operand(0)->operand(0);
EXPECT_EQ(cp_start_2->opcode(), HloOpcode::kCollectivePermuteStart);
const HloInstruction* cp_done_1 =
while_body->root_instruction()->operand(1)->operand(0);
EXPECT_EQ(cp_done_1->opcode(), HloOpcode::kCollectivePermuteDone);
EXPECT_LT(GetIndex(new_instruction_sequence, cp_done_1->name()),
GetIndex(new_instruction_sequence, cp_start_2->name()));
}
TEST_F(LatencyHidingSchedulerTest, SingleCollectivePermuteTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0), sharding={replicated}
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(), 3);
EXPECT_EQ(new_instruction_sequence[1]->opcode(),
HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(new_instruction_sequence[2]->opcode(),
HloOpcode::kCollectivePermuteDone);
}
TEST_F(LatencyHidingSchedulerTest, InplaceUpdateCPTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1 (param_0.1: f32[4,4,128], param_1.2: u32[]) -> f32[4,4,128] {
%param_0.1 = f32[4,4,128]{2,1,0:T(4,128)} parameter(0)
%constant.15 = f32[]{:T(128)} constant(1)
%broadcast.2 = f32[2,4,128]{2,1,0:T(4,128)} broadcast(f32[]{:T(128)} %constant.15), dimensions={}
%param_1.2 = u32[] parameter(1)
%constant.14 = u32[] constant(0)
ROOT %dynamic-update-slice.1 = f32[4,4,128]{2,1,0:T(4,128)} dynamic-update-slice(f32[4,4,128]{2,1,0:T(4,128)} %param_0.1, f32[2,4,128]{2,1,0:T(4,128)} %broadcast.2, u32[] %param_1.2, u32[] %constant.14, u32[] %constant.14)
}
ENTRY %module_spmd () -> f32[4,4,128] {
%constant.8 = u32[] constant(0)
%constant.5 = u32[] constant(2)
%tuple.1 = (u32[], u32[], u32[]) tuple(u32[] %constant.8, u32[] %constant.8, u32[] %constant.8)
%tuple = (u32[], u32[], u32[]) tuple(u32[] %constant.5, u32[] %constant.8, u32[] %constant.8)
%custom-call = f32[4,4,128]{2,1,0:T(4,128)} custom-call(), custom_call_target="AllocateBuffer"
%fusion.1 = f32[4,4,128]{2,1,0:T(4,128)} fusion(f32[4,4,128]{2,1,0:T(4,128)} %custom-call, u32[] %constant.5), kind=kLoop, calls=%fused_computation.1
%collective-permute = f32[4,4,128]{2,1,0:T(4,128)} collective-permute(f32[4,4,128]{2,1,0:T(4,128)} %fusion.1, f32[4,4,128]{2,1,0:T(4,128)} %fusion.1, (u32[], u32[], u32[]) %tuple, (u32[], u32[], u32[]) %tuple.1), channel_id=958, source_target_pairs={{0,4},{4,0},{1,5},{5,1},{2,6},{6,2},{3,7},{7,3}}, slice_sizes={{2,4,128}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"},\"scoped_memory_configs\":[]}"
ROOT %copy.3 = f32[4,4,128]{2,1,0:T(4,128)} copy(f32[4,4,128]{2,1,0:T(4,128)} %collective-permute)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(),
original_instruction_sequence.size() + 1);
}
TEST_F(LatencyHidingSchedulerTest, InplaceUpdateCPTest2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%sum (x.336: f32[], y.336: f32[]) -> f32[] {
%x.336 = f32[]{:T(128)} parameter(0)
%y.336 = f32[]{:T(128)} parameter(1)
ROOT %add.5252 = f32[]{:T(128)} add(f32[]{:T(128)} %x.336, f32[]{:T(128)} %y.336)
}
ENTRY %module () -> f32[33708,1024] {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2128,8,128]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%all-gather.1 = f32[4256,8,128]{2,1,0:T(8,128)} all-gather(f32[2128,8,128]{2,1,0:T(8,128)} %color_operand.1), replica_groups={{0,6},{2,4},{3,5},{1,7}}, dimensions={0}
%custom-call = f32[33712,8,128]{2,1,0:T(8,128)} custom-call(), custom_call_target="AllocateBuffer"
%dynamic-update-slice = f32[33712,8,128]{2,1,0:T(8,128)} dynamic-update-slice(f32[33712,8,128]{2,1,0:T(8,128)} %custom-call, f32[4256,8,128]{2,1,0:T(8,128)} %all-gather.1, u32[] %constant.19, u32[] %constant.19, u32[] %constant.19)
%tuple.7 = (u32[], u32[], u32[]) tuple(u32[] %constant.19, u32[] %constant.19, u32[] %constant.19)
%constant.20 = u32[] constant(4256)
%tuple.8 = (u32[], u32[], u32[]) tuple(u32[] %constant.20, u32[] %constant.19, u32[] %constant.19)
%collective-permute.3 = f32[33712,8,128]{2,1,0:T(8,128)} collective-permute(f32[33712,8,128]{2,1,0:T(8,128)} %dynamic-update-slice, f32[33712,8,128]{2,1,0:T(8,128)} %dynamic-update-slice, (u32[], u32[], u32[]) %tuple.7, (u32[], u32[], u32[]) %tuple.8), source_target_pairs={{0,2},{2,3},{3,1},{1,0},{6,4},{4,5},{5,7},{7,6}}, slice_sizes={{4256,8,128}}
%tuple.9 = (u32[], u32[], u32[]) tuple(u32[] %constant.20, u32[] %constant.19, u32[] %constant.19)
%constant.21 = u32[] constant(8512)
%tuple.10 = (u32[], u32[], u32[]) tuple(u32[] %constant.21, u32[] %constant.19, u32[] %constant.19)
%collective-permute.4 = f32[33712,8,128]{2,1,0:T(8,128)} collective-permute(f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.3, f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.3, (u32[], u32[], u32[]) %tuple.9, (u32[], u32[], u32[]) %tuple.10), source_target_pairs={{0,2},{2,3},{3,1},{1,0},{6,4},{4,5},{5,7},{7,6}}, slice_sizes={{4256,8,128}}
%tuple.11 = (u32[], u32[], u32[]) tuple(u32[] %constant.21, u32[] %constant.19, u32[] %constant.19)
%constant.22 = u32[] constant(12768)
%tuple.12 = (u32[], u32[], u32[]) tuple(u32[] %constant.22, u32[] %constant.19, u32[] %constant.19)
%collective-permute.5 = f32[33712,8,128]{2,1,0:T(8,128)} collective-permute(f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.4, f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.4, (u32[], u32[], u32[]) %tuple.11, (u32[], u32[], u32[]) %tuple.12), source_target_pairs={{0,2},{2,3},{3,1},{1,0},{6,4},{4,5},{5,7},{7,6}}, slice_sizes={{4256,8,128}}
ROOT %bitcast.16 = f32[33708,1024]{1,0:T(8,128)} bitcast(f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(),
original_instruction_sequence.size() + 4);
}
TEST_F(LatencyHidingSchedulerTest, TwoCollectivePermuteTypesOverlap) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = (f32[16,64,256]{2,1,0}, f32[16,64,256]{2,1,0}, f32[16,128,256]{2,1,0}) parameter(0)
gte0 = f32[16,64,256]{2,1,0} get-tuple-element(param), index=0
gte1 = f32[16,64,256]{2,1,0} get-tuple-element(param), index=1
cp0 = f32[16,64,256]{2,1,0} collective-permute(gte0),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp0"}
cp1 = f32[16,64,256]{2,1,0} collective-permute(cp0),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp1"}
c0 = f32[16,256,256]{2,1,0} convolution(gte0, gte1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
cp2 = f32[16,64,256]{2,1,0} collective-permute(gte1),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp2"}
c1 = f32[16,256,256]{2,1,0} convolution(cp0, gte1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
cp3 = f32[16,64,256]{2,1,0} collective-permute(cp2),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp3"}
gte2 = f32[16,128,256]{2,1,0} get-tuple-element(param), index=2
const0 = u32[] constant(0)
const1 = u32[] constant(8)
tuple0 = (u32[], u32[], u32[]) tuple(u32[] const0, u32[] const0, u32[] const0)
tuple1 = (u32[], u32[], u32[]) tuple(u32[] const1, u32[] const0, u32[] const0)
cp4 = f32[16,128,256]{2,1,0} collective-permute(gte2, gte2, tuple0, tuple1),
source_target_pairs={{2,3},{3,2}},
slice_sizes={{8,128,256}},
metadata={op_type="CollectivePermute" op_name="cp4"}
cp5 = f32[16,128,256]{2,1,0} collective-permute(cp4, cp4, tuple0, tuple1),
source_target_pairs={{2,3},{3,2}},
slice_sizes={{8,128,256}},
metadata={op_type="CollectivePermute" op_name="cp5"}
ROOT tuple = (f32[16,256,256]{2,1,0}, f32[16,64,256]{2,1,0}, f32[16,128,256]{2,1,0}) tuple(c1, cp3, cp5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(),
original_instruction_sequence.size() + 6);
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp4"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp4"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp5"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp5"),
GetIndex(new_instruction_sequence, "c1"));
}
TEST_F(LatencyHidingSchedulerTest, SerialCollectivePermutesTest) {
absl::string_view hlo_string = R"(
HloModule serial_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%collective-permute.2 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%add.3 = bf16[8]{0} add(%parameter.1, %parameter.1)
%add.4 = bf16[8]{0} add(%add.3, parameter.1)
%add.5 = bf16[8]{0} add(%collective-permute.2, %add.4)
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} add.5), source_target_pairs={{1,0},{0,3},{3,2}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(original_instruction_sequence.size(), 6);
EXPECT_EQ(new_instruction_sequence.size(), 8);
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[4]));
EXPECT_EQ(original_instruction_sequence[0]->user_count(), 3);
EXPECT_EQ(original_instruction_sequence[0]->users()[0]->opcode(),
HloOpcode::kCollectivePermuteStart);
HloInstruction* collective_permute_start_1 =
original_instruction_sequence[0]->users()[0];
EXPECT_EQ(
PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]) +
1,
PositionInVector(new_instruction_sequence, collective_permute_start_1));
EXPECT_EQ(collective_permute_start_1->user_count(), 1);
EXPECT_EQ(collective_permute_start_1->users()[0]->opcode(),
HloOpcode::kCollectivePermuteDone);
HloInstruction* collective_permute_done_1 =
collective_permute_start_1->users()[0];
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])) ||
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[1])));
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) <
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])));
EXPECT_EQ(new_instruction_sequence[7]->opcode(),
HloOpcode::kCollectivePermuteDone);
EXPECT_GT(
PositionInVector(new_instruction_sequence,
new_instruction_sequence[7]->operand(0)),
PositionInVector(new_instruction_sequence, collective_permute_done_1));
}
TEST_F(LatencyHidingSchedulerTest, BackToBackCollectivePerGmutesTest) {
absl::string_view hlo_string = R"(
HloModule back_to_back_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%collective-permute.2 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} collective-permute.2), source_target_pairs={{1,0},{0,3},{3,2}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(original_instruction_sequence.size(), 3);
EXPECT_EQ(new_instruction_sequence.size(), 5);
EXPECT_EQ(original_instruction_sequence[0]->user_count(), 1);
EXPECT_EQ(original_instruction_sequence[0]->users()[0]->opcode(),
HloOpcode::kCollectivePermuteStart);
HloInstruction* collective_permute_start_1 =
original_instruction_sequence[0]->users()[0];
EXPECT_EQ(
PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]) +
1,
PositionInVector(new_instruction_sequence, collective_permute_start_1));
EXPECT_EQ(collective_permute_start_1->user_count(), 1);
EXPECT_EQ(collective_permute_start_1->users()[0]->opcode(),
HloOpcode::kCollectivePermuteDone);
HloInstruction* collective_permute_done_1 =
collective_permute_start_1->users()[0];
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])) ||
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[1])));
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) <
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])));
EXPECT_EQ(new_instruction_sequence[4]->opcode(),
HloOpcode::kCollectivePermuteDone);
EXPECT_GT(
PositionInVector(new_instruction_sequence,
new_instruction_sequence[4]->operand(0)),
PositionInVector(new_instruction_sequence, collective_permute_done_1));
}
TEST_F(LatencyHidingSchedulerTest, ParallelCollectivePermutesTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%collective-permute.2 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%constant.3 = bf16[] constant(1)
%broadcast.4 = bf16[8]{0} broadcast(bf16[] %constant.3), dimensions={}
%add.5 = bf16[8]{0} add(bf16[8]{0} %collective-permute.2, bf16[8]{0} %broadcast.4)
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{1,0},{0,3},{3,2}}
%add.6 = bf16[8]{0} add(bf16[8]{0} %collective-permute.6, bf16[8]{0} %add.5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[4]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[4]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[6]));
EXPECT_EQ(original_instruction_sequence[0]->user_count(), 2);
EXPECT_EQ(original_instruction_sequence[0]->users()[0]->opcode(),
HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(original_instruction_sequence[0]->users()[1]->opcode(),
HloOpcode::kCollectivePermuteStart);
int collective_permute_1_pos = PositionInVector(
new_instruction_sequence, original_instruction_sequence[0]->users()[0]);
int collective_permute_2_pos = PositionInVector(
new_instruction_sequence, original_instruction_sequence[0]->users()[1]);
EXPECT_TRUE((collective_permute_1_pos == collective_permute_2_pos + 1) ||
(collective_permute_1_pos + 1 == collective_permute_2_pos));
}
TEST_F(LatencyHidingSchedulerTest, MaxConcurrentCollectivePermutesTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%parameter.2 = bf16[8]{0} parameter(1)
%parameter.3 = bf16[8]{0} parameter(2)
%collective-permute.4 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.5 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{1,0},{0,3},{3,2}}
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.2), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.7 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.2), source_target_pairs={{1,0},{0,3},{3,2}}
%collective-permute.8 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.3), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.9 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.3), source_target_pairs={{1,0},{0,3},{3,2}}
%add.10 = bf16[8]{0} add(bf16[8]{0} %collective-permute.8, bf16[8]{0} %collective-permute.9)
%add.11 = bf16[8]{0} add(bf16[8]{0} %collective-permute.7, bf16[8]{0} %add.10)
%add.12 = bf16[8]{0} add(bf16[8]{0} %collective-permute.6, bf16[8]{0} %add.11)
%add.13 = bf16[8]{0} add(bf16[8]{0} %collective-permute.5, bf16[8]{0} %add.12)
ROOT %add.14 = bf16[8]{0} add(bf16[8]{0} %collective-permute.4, bf16[8]{0} %add.13)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_TRUE(
MaxConcurrentCollectivePermutesBelowThreshold(new_instruction_sequence));
}
TEST_F(LatencyHidingSchedulerTest, BalanceChainedCollectivePermutesNoOverlap) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[8]{0} parameter(0)
collective-permute.1 = bf16[8]{0} collective-permute(param), source_target_pairs={{0,1},{1,2},{2,3}}
copy.2 = bf16[8]{0} copy(collective-permute.1)
ROOT collective-permute.2 = bf16[8]{0} collective-permute(copy.2), source_target_pairs={{1,0},{0,3},{3,2}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
}
TEST_F(LatencyHidingSchedulerTest, ExistingSingleCollectivePermuteAsyncTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
%collective-permute-start.1 = (f32[16,256,256]{2,1,0},
f32[16,256,256]{2,1,0}, u32[], u32[]) collective-permute-start(
f32[16,256,256]{2,1,0} p2), source_target_pairs={{0,1},{1,2},{2,3}},
channel_id=1, metadata={op_type="CollectivePermute" op_name="cp0"}
%collective-permute-done.1 = f32[16,256,256]{2,1,0} collective-permute-done(
(f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0},
u32[], u32[]) collective-permute-start.1),
metadata={op_type="CollectivePermute" op_name="cp0"}
ROOT a = f32[16,256,256]{2,1,0} add(c0, collective-permute-done.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GE(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
}
TEST_F(LatencyHidingSchedulerTest, BalanceChainExtended) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
cp0 = f32[16,256,256]{2,1,0} collective-permute(p2),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp0"}
cp1 = f32[16,256,256]{2,1,0} collective-permute(p3),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp1"}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
t0 = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(cp0, cp1)
gte0 = f32[16,256,256]{2,1,0} get-tuple-element(t0), index=0
gte1 = f32[16,256,256]{2,1,0} get-tuple-element(t0), index=1
cp2 = f32[16,256,256]{2,1,0} collective-permute(gte0),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp2"}
a2 = f32[16,256,256]{2,1,0} add(cp2, c0)
cp3 = f32[16,256,256]{2,1,0} collective-permute(gte1),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp3"}
ROOT tuple = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(a2, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
}
TEST_F(LatencyHidingSchedulerTest,
BalanceChainedCollectivePermutesLoopedEinsum) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1793 (param_0.4944: s32[16], param_1.5648: u32[], param_2.3959: u32[], param_3.3338: u32[], param_4.2302: u32[]) -> (s32[1], s32[1], s32[1], s32[1]) {
%param_0.4944 = s32[16]{0:T(128)} parameter(0)
%param_1.5648 = u32[]{:T(128)} parameter(1)
%dynamic-slice.1806 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_1.5648), dynamic_slice_sizes={1}
%param_2.3959 = u32[]{:T(128)} parameter(2)
%dynamic-slice.1807 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_2.3959), dynamic_slice_sizes={1}
%param_3.3338 = u32[]{:T(128)} parameter(3)
%dynamic-slice.1808 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_3.3338), dynamic_slice_sizes={1}
%param_4.2302 = u32[]{:T(128)} parameter(4)
%dynamic-slice.1809 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_4.2302), dynamic_slice_sizes={1}
ROOT %tuple.1384 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %dynamic-slice.1806, s32[1]{0:T(128)} %dynamic-slice.1807, s32[1]{0:T(128)} %dynamic-slice.1808, s32[1]{0:T(128)} %dynamic-slice.1809)
}
%fused_computation.109 (param_0.225: bf16[8,1024,1,20,256,1,1]) -> bf16[8,1024,1,20,256,1,1,1] {
%param_0.225 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.713 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} bitcast(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.225)
}
%fused_computation.110.clone (param_0.251: s32[], param_1.277: bf16[1,20,256,1,16,4,288,1], param_2.190: s32[]) -> bf16[1,20,256,2,1,4,288,1] {
%param_1.277 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(1)
%constant.6014 = bf16[]{:T(256)} constant(-inf)
%pad.370 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.277, bf16[]{:T(256)} %constant.6014), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0x0_0
%constant.6004 = s32[]{:T(128)} constant(0)
%param_0.251 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1503 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.370, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %param_0.251, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
%pad.369 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.277, bf16[]{:T(256)} %constant.6014), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0x0_0
%param_2.190 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1502 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.369, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %param_2.190, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
ROOT %maximum.513 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} maximum(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1503, bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1502)
}
%fused_computation.108 (param_0.235: bf16[8,1024,1,20,256,1,1], param_1.276: s32[], param_2.187: bf16[1,20,256,1,16,4,288,1], param_3.145: s32[]) -> bf16[2,1,4,288,8,1024,1,1] {
%param_1.276 = s32[]{:T(128)} parameter(1)
%param_2.187 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(2)
%param_3.145 = s32[]{:T(128)} parameter(3)
%fusion.132 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.276, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_2.187, s32[]{:T(128)} %param_3.145), kind=kLoop, calls=%fused_computation.110.clone
%param_0.235 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
%fusion.129 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.235), kind=kLoop, calls=%fused_computation.109
ROOT %convolution.170 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} convolution(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %fusion.132, bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} %fusion.129), window={size=1x1x8x1x20x1 pad=0_0x0_0x7_7x0_0x0_0x0_0 rhs_reversal=0x0x1x0x0x0}, dim_labels=34f501b2_2o34i015->501b2f34
}
%fused_computation.117 (param_0.248: bf16[1,4,288,8,1024,1,1], param_1.273: bf16[2,1,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.248 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} parameter(0)
%param_1.273 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} parameter(1)
%slice.1252 = bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} slice(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %param_1.273), slice={[0:1], [0:1], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
%bitcast.719 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} bitcast(bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %slice.1252)
ROOT %add.3083 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %param_0.248, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %bitcast.719)
}
%fused_computation.107 (param_0.223: bf16[8,1024,1,20,256,1,1]) -> bf16[8,1024,1,20,256,1,1,1] {
%param_0.223 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.711 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} bitcast(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.223)
}
%fused_computation.111.clone (param_0.250: s32[], param_1.275: bf16[1,20,256,1,16,4,288,1], param_2.189: s32[]) -> bf16[1,20,256,2,1,4,288,1] {
%param_1.275 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(1)
%constant.6009 = bf16[]{:T(256)} constant(-inf)
%pad.374 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.275, bf16[]{:T(256)} %constant.6009), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0x0_0
%constant.5999 = s32[]{:T(128)} constant(0)
%param_0.250 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1507 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.374, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %param_0.250, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
%pad.373 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.275, bf16[]{:T(256)} %constant.6009), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0x0_0
%param_2.189 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1506 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.373, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %param_2.189, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
ROOT %maximum.514 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} maximum(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1507, bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1506)
}
%fused_computation.106 (param_0.239: bf16[8,1024,1,20,256,1,1], param_1.274: s32[], param_2.185: bf16[1,20,256,1,16,4,288,1], param_3.144: s32[]) -> bf16[2,1,4,288,8,1024,1,1] {
%param_1.274 = s32[]{:T(128)} parameter(1)
%param_2.185 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(2)
%param_3.144 = s32[]{:T(128)} parameter(3)
%fusion.133 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.274, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_2.185, s32[]{:T(128)} %param_3.144), kind=kLoop, calls=%fused_computation.111.clone
%param_0.239 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
%fusion.127 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.239), kind=kLoop, calls=%fused_computation.107
ROOT %convolution.169 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} convolution(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %fusion.133, bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} %fusion.127), window={size=1x1x8x1x20x1 pad=0_0x0_0x7_7x0_0x0_0x0_0 rhs_reversal=0x0x1x0x0x0}, dim_labels=34f501b2_2o34i015->501b2f34
}
%fused_computation.115 (param_0.244: bf16[1,4,288,8,1024,1,1], param_1.270: bf16[2,1,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.244 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} parameter(0)
%param_1.270 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} parameter(1)
%slice.1249 = bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} slice(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %param_1.270), slice={[0:1], [0:1], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
%bitcast.716 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} bitcast(bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %slice.1249)
ROOT %add.3082 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %param_0.244, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %bitcast.716)
}
%fused_computation.113 (param_0.241: bf16[1,4,288,8,1024,1,1], param_1.267: bf16[2,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.241 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(0)
%param_1.267 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(1)
%slice.1246 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} slice(bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_1.267), slice={[1:2], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
ROOT %add.3081 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_0.241, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %slice.1246)
}
%fused_computation.112 (param_0.240: bf16[1,4,288,8,1024,1,1], param_1.265: bf16[2,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.240 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(0)
%param_1.265 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(1)
%slice.1245 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} slice(bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_1.265), slice={[1:2], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
ROOT %add.3080 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_0.240, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %slice.1245)
}
)";
hlo_string += R"(
ENTRY entry {
%param.163 = (bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) parameter(0)
%get-tuple-element.20289 = bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=0
%get-tuple-element.20290 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=1
%get-tuple-element.20291 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=2
%collective-permute.8 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %get-tuple-element.20291), channel_id=22, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%iota.36 = s32[16]{0:T(128)} iota(), iota_dimension=0
%constant.3283 = u32[1024]{0:T(1024)} constant({...})
%partition-id.6 = u32[]{:T(128)} partition-id()
%dynamic-slice.254 = u32[1]{0:T(128)} dynamic-slice(u32[1024]{0:T(1024)} %constant.3283, u32[]{:T(128)} %partition-id.6), dynamic_slice_sizes={1}
%bitcast.55 = u32[]{:T(128)} bitcast(u32[1]{0:T(128)} %dynamic-slice.254)
%constant.5148 = u32[]{:T(128)} constant(8)
%add.2615 = u32[]{:T(128)} add(u32[]{:T(128)} %bitcast.55, u32[]{:T(128)} %constant.5148)
%get-tuple-element.20293 = u32[]{:T(128)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=4
%copy.2385 = u32[]{:T(128)} copy(u32[]{:T(128)} %get-tuple-element.20293)
%constant.3305 = u32[]{:T(128)} constant(1)
%add.1503 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2385, u32[]{:T(128)} %constant.3305)
%subtract.200 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2615, u32[]{:T(128)} %add.1503)
%constant.4875 = u32[]{:T(128)} constant(15)
%and.29 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.200, u32[]{:T(128)} %constant.4875)
%add.1504 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1503, u32[]{:T(128)} %bitcast.55)
%constant.3285 = u32[]{:T(128)} constant(9)
%add.1506 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1504, u32[]{:T(128)} %constant.3285)
%and.28 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1506, u32[]{:T(128)} %constant.4875)
%subtract.198 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2615, u32[]{:T(128)} %copy.2385)
%and.27 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.198, u32[]{:T(128)} %constant.4875)
%add.1498 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2385, u32[]{:T(128)} %bitcast.55)
%add.1500 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1498, u32[]{:T(128)} %constant.3285)
%and.26 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1500, u32[]{:T(128)} %constant.4875)
%fusion.1987 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) fusion(s32[16]{0:T(128)} %iota.36, u32[]{:T(128)} %and.29, u32[]{:T(128)} %and.28, u32[]{:T(128)} %and.27, u32[]{:T(128)} %and.26), kind=kLoop, calls=%fused_computation.1793
%get-tuple-element.19793 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=3
%bitcast.56 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19793)
%bitcast.54 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} bitcast(bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)} %get-tuple-element.20289)
%get-tuple-element.19792 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=2
%bitcast.57 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19792)
%fusion.128 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %get-tuple-element.20290, s32[]{:T(128)} %bitcast.56, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %bitcast.54, s32[]{:T(128)} %bitcast.57), kind=kOutput, calls=%fused_computation.108
%fusion.139 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %collective-permute.8, bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.128), kind=kLoop, calls=%fused_computation.117
%collective-permute.10 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %fusion.139), channel_id=24, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%get-tuple-element.19791 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=1
%bitcast.60 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19791)
%get-tuple-element.19790 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=0
%bitcast.61 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19790)
%fusion.126 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %get-tuple-element.20290, s32[]{:T(128)} %bitcast.60, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %bitcast.54, s32[]{:T(128)} %bitcast.61), kind=kOutput, calls=%fused_computation.106
%fusion.137 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %collective-permute.10, bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.126), kind=kLoop, calls=%fused_computation.115
%get-tuple-element.20292 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=3
%collective-permute.9 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %get-tuple-element.20292), channel_id=23, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%bitcast.63 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} bitcast(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.128)
%fusion.135 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %collective-permute.9, bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %bitcast.63), kind=kLoop, calls=%fused_computation.113
%collective-permute.11 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %fusion.135), channel_id=25, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%bitcast.64 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} bitcast(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.126)
%fusion.134 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %collective-permute.11, bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %bitcast.64), kind=kLoop, calls=%fused_computation.112
%constant.5023 = u32[]{:T(128)} constant(2)
%add.1925 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2385, u32[]{:T(128)} %constant.5023)
ROOT %tuple.1457 = (bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) tuple(bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)} %get-tuple-element.20289, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %get-tuple-element.20290, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %fusion.137, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %fusion.134, u32[]{:T(128)} %add.1925)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "fusion.126"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "fusion.126"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "fusion.126"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "fusion.126"));
}
TEST_F(LatencyHidingSchedulerTest, MoveCentainConv) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
cp0 = f32[16,256,256]{2,1,0} collective-permute(p2),
source_target_pairs={{0,1},{1,0}}
cp1 = f32[16,256,256]{2,1,0} collective-permute(p3),
source_target_pairs={{0,1},{1,0}}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
a0 = f32[16,256,256]{2,1,0} add(cp0, c1)
cp2 = f32[16,256,256]{2,1,0} collective-permute(a0),
source_target_pairs={{0,1},{1,0}}
a2 = f32[16,256,256]{2,1,0} add(cp2, c0)
a1 = f32[16,256,256]{2,1,0} add(cp1, c1)
cp3 = f32[16,256,256]{2,1,0} collective-permute(a1),
source_target_pairs={{0,1},{1,0}}
ROOT tuple = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(a2, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "c0"));
}
TEST_F(LatencyHidingSchedulerTest,
BalanceChainedCollectivePermutesLoopedEinsum2) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1851 (param_0.5170: s32[32], param_1.5848: u32[], param_2.4103: u32[], param_3.3513: u32[], param_4.2356: u32[]) -> (s32[1], s32[1], s32[1], s32[1]) {
%param_0.5170 = s32[32]{0:T(128)} parameter(0)
%param_1.5848 = u32[]{:T(128)} parameter(1)
%dynamic-slice.1636 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_1.5848), dynamic_slice_sizes={1}
%param_2.4103 = u32[]{:T(128)} parameter(2)
%dynamic-slice.1637 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_2.4103), dynamic_slice_sizes={1}
%param_3.3513 = u32[]{:T(128)} parameter(3)
%dynamic-slice.1638 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_3.3513), dynamic_slice_sizes={1}
%param_4.2356 = u32[]{:T(128)} parameter(4)
%dynamic-slice.1639 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_4.2356), dynamic_slice_sizes={1}
ROOT %tuple.1297 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %dynamic-slice.1636, s32[1]{0:T(128)} %dynamic-slice.1637, s32[1]{0:T(128)} %dynamic-slice.1638, s32[1]{0:T(128)} %dynamic-slice.1639)
}
%fused_computation.117 (param_0.249: bf16[16,1024,1,10,256,1]) -> bf16[16,1024,1,10,256,1,1] {
%param_0.249 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.672 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} bitcast(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.249)
}
%fused_computation.124.clone (param_0.277: s32[], param_1.330: bf16[1,10,256,1,32,576,1], param_2.233: s32[]) -> bf16[1,10,256,2,1,576,1] {
%param_1.330 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(1)
%constant.5658 = bf16[]{:T(256)} constant(-inf)
%pad.357 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.330, bf16[]{:T(256)} %constant.5658), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0
%constant.5648 = s32[]{:T(128)} constant(0)
%param_0.277 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1327 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.357, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %param_0.277, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648), dynamic_slice_sizes={1,10,256,2,1,576,1}
%pad.363 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.330, bf16[]{:T(256)} %constant.5658), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0
%param_2.233 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1333 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.363, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %param_2.233, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648), dynamic_slice_sizes={1,10,256,2,1,576,1}
ROOT %maximum.510 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} maximum(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1327, bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1333)
}
%fused_computation.116 (param_0.264: bf16[16,1024,1,10,256,1], param_1.329: s32[], param_2.230: bf16[1,10,256,1,32,576,1], param_3.197: s32[]) -> bf16[2,1,576,16,1024,1,1] {
%param_1.329 = s32[]{:T(128)} parameter(1)
%param_2.230 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(2)
%param_3.197 = s32[]{:T(128)} parameter(3)
%fusion.155 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.329, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_2.230, s32[]{:T(128)} %param_3.197), kind=kLoop, calls=%fused_computation.124.clone
%param_0.264 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
%fusion.147 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.264), kind=kLoop, calls=%fused_computation.117
ROOT %convolution.168 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} convolution(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %fusion.155, bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} %fusion.147), window={size=1x16x1x10x1 pad=0_0x15_15x0_0x0_0x0_0 rhs_reversal=0x1x0x0x0}, dim_labels=23f40b1_1o23i04->40b1f23
}
%fused_computation.123 (param_0.258: bf16[1,576,16,1024,1,1], param_1.306: bf16[2,1,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.258 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} parameter(0)
%param_1.306 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} parameter(1)
%slice.1132 = bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} slice(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %param_1.306), slice={[0:1], [0:1], [0:576], [0:16], [0:1024], [0:1], [0:1]}
%bitcast.678 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} bitcast(bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %slice.1132)
ROOT %add.3125 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %param_0.258, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %bitcast.678)
}
%fused_computation.115 (param_0.247: bf16[16,1024,1,10,256,1]) -> bf16[16,1024,1,10,256,1,1] {
%param_0.247 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.670 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} bitcast(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.247)
}
%fused_computation.125.clone (param_0.276: s32[], param_1.328: bf16[1,10,256,1,32,576,1], param_2.232: s32[]) -> bf16[1,10,256,2,1,576,1] {
%param_1.328 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(1)
%constant.5653 = bf16[]{:T(256)} constant(-inf)
%pad.360 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.328, bf16[]{:T(256)} %constant.5653), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0
%constant.5643 = s32[]{:T(128)} constant(0)
%param_0.276 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1330 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.360, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %param_0.276, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643), dynamic_slice_sizes={1,10,256,2,1,576,1}
%pad.366 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.328, bf16[]{:T(256)} %constant.5653), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0
%param_2.232 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1336 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.366, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %param_2.232, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643), dynamic_slice_sizes={1,10,256,2,1,576,1}
ROOT %maximum.512 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} maximum(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1330, bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1336)
}
%fused_computation.114 (param_0.269: bf16[16,1024,1,10,256,1], param_1.327: s32[], param_2.228: bf16[1,10,256,1,32,576,1], param_3.196: s32[]) -> bf16[2,1,576,16,1024,1,1] {
%param_1.327 = s32[]{:T(128)} parameter(1)
%param_2.228 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(2)
%param_3.196 = s32[]{:T(128)} parameter(3)
%fusion.157 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.327, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_2.228, s32[]{:T(128)} %param_3.196), kind=kLoop, calls=%fused_computation.125.clone
%param_0.269 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
%fusion.145 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.269), kind=kLoop, calls=%fused_computation.115
ROOT %convolution.167 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} convolution(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %fusion.157, bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} %fusion.145), window={size=1x16x1x10x1 pad=0_0x15_15x0_0x0_0x0_0 rhs_reversal=0x1x0x0x0}, dim_labels=23f40b1_1o23i04->40b1f23
}
%fused_computation.121 (param_0.254: bf16[1,576,16,1024,1,1], param_1.303: bf16[2,1,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.254 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} parameter(0)
%param_1.303 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} parameter(1)
%slice.1129 = bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} slice(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %param_1.303), slice={[0:1], [0:1], [0:576], [0:16], [0:1024], [0:1], [0:1]}
%bitcast.675 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} bitcast(bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %slice.1129)
ROOT %add.3124 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %param_0.254, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %bitcast.675)
}
%fused_computation.119 (param_0.251: bf16[1,576,16,1024,1,1], param_1.300: bf16[2,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.251 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(0)
%param_1.300 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(1)
%slice.1126 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} slice(bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_1.300), slice={[1:2], [0:576], [0:16], [0:1024], [0:1], [0:1]}
ROOT %add.3123 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_0.251, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %slice.1126)
}
%fused_computation.118 (param_0.250: bf16[1,576,16,1024,1,1], param_1.298: bf16[2,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.250 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(0)
%param_1.298 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(1)
%slice.1125 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} slice(bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_1.298), slice={[1:2], [0:576], [0:16], [0:1024], [0:1], [0:1]}
ROOT %add.3122 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_0.250, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %slice.1125)
}
)";
hlo_string += R"(
ENTRY entry {
%constant.4782 = u32[]{:T(128)} constant(16)
%constant.4661 = u32[]{:T(128)} constant(2)
%constant.4517 = u32[]{:T(128)} constant(31)
%constant.3078 = u32[]{:T(128)} constant(1)
%constant.3060 = u32[]{:T(128)} constant(17)
%partition-id.6 = u32[]{:T(128)} partition-id()
%param.139 = (bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) parameter(0)
%get-tuple-element.16007 = u32[]{:T(128)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=4
%copy.1385 = u32[]{:T(128)} copy(u32[]{:T(128)} %get-tuple-element.16007)
%add.1492 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.1385, u32[]{:T(128)} %constant.3078)
%add.1938 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.1385, u32[]{:T(128)} %constant.4661)
%get-tuple-element.16004 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=1
%get-tuple-element.16003 = bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=0
%bitcast.58 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} bitcast(bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)} %get-tuple-element.16003)
%get-tuple-element.16005 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=2
%get-tuple-element.16006 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=3
%constant.3058 = u32[1024]{0:T(1024)} constant({...})
%dynamic-slice.218 = u32[1]{0:T(128)} dynamic-slice(u32[1024]{0:T(1024)} %constant.3058, u32[]{:T(128)} %partition-id.6), dynamic_slice_sizes={1}
%bitcast.59 = u32[]{:T(128)} bitcast(u32[1]{0:T(128)} %dynamic-slice.218)
%add.1493 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1492, u32[]{:T(128)} %bitcast.59)
%add.1495 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1493, u32[]{:T(128)} %constant.3060)
%and.28 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1495, u32[]{:T(128)} %constant.4517)
%add.2636 = u32[]{:T(128)} add(u32[]{:T(128)} %bitcast.59, u32[]{:T(128)} %constant.4782)
%subtract.200 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2636, u32[]{:T(128)} %add.1492)
%and.29 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.200, u32[]{:T(128)} %constant.4517)
%subtract.198 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2636, u32[]{:T(128)} %copy.1385)
%and.27 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.198, u32[]{:T(128)} %constant.4517)
%add.1487 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.1385, u32[]{:T(128)} %bitcast.59)
%add.1489 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1487, u32[]{:T(128)} %constant.3060)
%and.26 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1489, u32[]{:T(128)} %constant.4517)
%iota.60 = s32[32]{0:T(128)} iota(), iota_dimension=0
%fusion.2068 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) fusion(s32[32]{0:T(128)} %iota.60, u32[]{:T(128)} %and.29, u32[]{:T(128)} %and.28, u32[]{:T(128)} %and.27, u32[]{:T(128)} %and.26), kind=kLoop, calls=%fused_computation.1851
%get-tuple-element.15499 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=3
%bitcast.60 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15499)
%get-tuple-element.15498 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=2
%bitcast.61 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15498)
%get-tuple-element.15497 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=1
%bitcast.64 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15497)
%get-tuple-element.15496 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=0
%bitcast.65 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15496)
%collective-permute.9 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %get-tuple-element.16006), channel_id=23, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,16},{16,17},{17,18},{18,19},{19,20},{20,21},{21,22},{22,23},{23,24},{24,25},{25,26},{26,27},{27,28},{28,29},{29,30},{30,31},{31,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%collective-permute.8 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %get-tuple-element.16005), channel_id=22, source_target_pairs={{0,31},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14},{16,15},{17,16},{18,17},{19,18},{20,19},{21,20},{22,21},{23,22},{24,23},{25,24},{26,25},{27,26},{28,27},{29,28},{30,29},{31,30}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%fusion.144 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.16004, s32[]{:T(128)} %bitcast.64, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %bitcast.58, s32[]{:T(128)} %bitcast.65), kind=kOutput, calls=%fused_computation.114
%bitcast.68 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} bitcast(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.144)
%fusion.146 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.16004, s32[]{:T(128)} %bitcast.60, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %bitcast.58, s32[]{:T(128)} %bitcast.61), kind=kOutput, calls=%fused_computation.116
%fusion.153 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %collective-permute.8, bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.146), kind=kLoop, calls=%fused_computation.123
%collective-permute.10 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %fusion.153), channel_id=24, source_target_pairs={{0,31},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14},{16,15},{17,16},{18,17},{19,18},{20,19},{21,20},{22,21},{23,22},{24,23},{25,24},{26,25},{27,26},{28,27},{29,28},{30,29},{31,30}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%fusion.151 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %collective-permute.10, bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.144), kind=kLoop, calls=%fused_computation.121
%bitcast.67 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} bitcast(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.146)
%fusion.149 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %collective-permute.9, bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %bitcast.67), kind=kLoop, calls=%fused_computation.119
%collective-permute.11 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %fusion.149), channel_id=25, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,16},{16,17},{17,18},{18,19},{19,20},{20,21},{21,22},{22,23},{23,24},{24,25},{25,26},{26,27},{27,28},{28,29},{29,30},{30,31},{31,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%fusion.148 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %collective-permute.11, bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %bitcast.68), kind=kLoop, calls=%fused_computation.118
ROOT %tuple.1373 = (bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) tuple(bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)} %get-tuple-element.16003, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.16004, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %fusion.151, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %fusion.148, u32[]{:T(128)} %add.1938)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "fusion.144"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "fusion.144"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "fusion.144"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "fusion.144"));
}
TEST_F(LatencyHidingSchedulerTest,
BalanceChainedCollectivePermutesLoopedEinsum3) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1799 (param_0.4926: s32[16], param_1.5709: u32[], param_2.3976: u32[], param_3.3386: u32[], param_4.2299: u32[]) -> (s32[1], s32[1], s32[1], s32[1]) {
%param_0.4926 = s32[16]{0:T(128)} parameter(0)
%param_1.5709 = u32[]{:T(128)} parameter(1)
%dynamic-slice.1611 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_1.5709), dynamic_slice_sizes={1}
%param_2.3976 = u32[]{:T(128)} parameter(2)
%dynamic-slice.1612 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_2.3976), dynamic_slice_sizes={1}
%param_3.3386 = u32[]{:T(128)} parameter(3)
%dynamic-slice.1613 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_3.3386), dynamic_slice_sizes={1}
%param_4.2299 = u32[]{:T(128)} parameter(4)
%dynamic-slice.1614 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_4.2299), dynamic_slice_sizes={1}
ROOT %tuple.1346 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %dynamic-slice.1611, s32[1]{0:T(128)} %dynamic-slice.1612, s32[1]{0:T(128)} %dynamic-slice.1613, s32[1]{0:T(128)} %dynamic-slice.1614)
}
%fused_computation.243 (param_0.505: bf16[8,2048,2,576,1,1], param_1.586: bf16[8,2048,2,576,1,1]) -> bf16[8,2048,4,576,1,1] {
%param_1.586 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(1)
%constant.5838 = bf16[]{:T(256)} constant(-inf)
%pad.368 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_1.586, bf16[]{:T(256)} %constant.5838), padding=0_0x0_0x0_2x0_0x0_0x0_0
%param_0.505 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(0)
%pad.367 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_0.505, bf16[]{:T(256)} %constant.5838), padding=0_0x0_0x2_0x0_0x0_0x0_0
ROOT %maximum.528 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} maximum(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.368, bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.367)
}
%fused_computation.244 (param_0.507: bf16[8,2048,2,576,1,1], param_1.585: bf16[8,2048,2,576,1,1]) -> bf16[8,2048,4,576,1,1] {
%param_1.585 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(1)
%constant.5832 = bf16[]{:T(256)} constant(-inf)
%pad.370 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_1.585, bf16[]{:T(256)} %constant.5832), padding=0_0x0_0x0_2x0_0x0_0x0_0
%param_0.507 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(0)
%pad.369 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_0.507, bf16[]{:T(256)} %constant.5832), padding=0_0x0_0x2_0x0_0x0_0x0_0
ROOT %maximum.529 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} maximum(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.370, bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.369)
}
%fused_computation.247 (param_0.511: bf16[8,2048,2,2,576,1,1]) -> bf16[8,2048,2,2,576,1,1] {
%param_0.511 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(0)
ROOT %copy.2292 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} copy(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_0.511)
}
%fused_computation.248.clone (param_0.526: s32[], param_1.589: bf16[1,32,576,1,36,256,1], param_2.400: s32[]) -> bf16[2,2,576,1,36,256,1] {
%param_1.589 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(1)
%constant.5843 = bf16[]{:T(256)} constant(-inf)
%pad.378 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.589, bf16[]{:T(256)} %constant.5843), padding=0_1x0_0x0_0x0_0x0_0x0_0x0_0
%constant.5853 = s32[]{:T(128)} constant(0)
%param_0.526 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1382 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.378, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %param_0.526, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853), dynamic_slice_sizes={2,2,576,1,36,256,1}
%pad.377 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.589, bf16[]{:T(256)} %constant.5843), padding=1_0x0_0x0_0x0_0x0_0x0_0x0_0
%param_2.400 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1381 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.377, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %param_2.400, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853), dynamic_slice_sizes={2,2,576,1,36,256,1}
ROOT %maximum.532 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} maximum(bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1382, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1381)
}
%fused_computation.246 (param_0.521: bf16[8,2048,2,2,576,1,1], param_1.588: s32[], param_2.399: bf16[1,32,576,1,36,256,1], param_3.247: s32[]) -> bf16[8,2048,1,36,256,1,1] {
%param_0.521 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(0)
%fusion.268 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} fusion(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_0.521), kind=kLoop, calls=%fused_computation.247
%param_1.588 = s32[]{:T(128)} parameter(1)
%param_2.399 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(2)
%param_3.247 = s32[]{:T(128)} parameter(3)
%fusion.271 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.588, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_2.399, s32[]{:T(128)} %param_3.247), kind=kLoop, calls=%fused_computation.248.clone
ROOT %convolution.172 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} convolution(bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} %fusion.268, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %fusion.271), window={size=1x1x36x2x2 pad=0_0x0_0x35_35x0_0x0_0 rhs_reversal=0x1x1x0x0}, dim_labels=0b43f12_43i12o0->0b12f34
}
%fused_computation.245 (param_0.508: bf16[8,2048,2,2,576,1,1]) -> bf16[8,2048,2,2,576,1,1] {
%param_0.508 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(0)
ROOT %copy.2290 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} copy(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_0.508)
}
%fused_computation.249.clone (param_0.525: s32[], param_1.587: bf16[1,32,576,1,36,256,1], param_2.398: s32[]) -> bf16[2,2,576,1,36,256,1] {
%param_1.587 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(1)
%constant.5837 = bf16[]{:T(256)} constant(-inf)
%pad.382 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.587, bf16[]{:T(256)} %constant.5837), padding=0_1x0_0x0_0x0_0x0_0x0_0x0_0
%constant.5848 = s32[]{:T(128)} constant(0)
%param_0.525 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1386 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.382, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %param_0.525, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848), dynamic_slice_sizes={2,2,576,1,36,256,1}
%pad.381 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.587, bf16[]{:T(256)} %constant.5837), padding=1_0x0_0x0_0x0_0x0_0x0_0x0_0
%param_2.398 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1385 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.381, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %param_2.398, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848), dynamic_slice_sizes={2,2,576,1,36,256,1}
ROOT %maximum.533 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} maximum(bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1386, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1385)
}
%fused_computation.241 (param_0.503: bf16[8,2048,1,36,256,1], param_1.561: bf16[8,2048,1,36,256,1,1], param_2.397: bf16[8,2048,2,2,576,1,1], param_3.246: s32[], param_4.127: bf16[1,32,576,1,36,256,1], param_5.55: s32[]) -> bf16[8,2048,1,36,256,1] {
%param_0.503 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
%param_1.561 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} parameter(1)
%bitcast.599 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} bitcast(bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} %param_1.561)
%add.3146 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} add(bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.503, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %bitcast.599)
%param_2.397 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(2)
%fusion.266 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} fusion(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_2.397), kind=kLoop, calls=%fused_computation.245
%param_3.246 = s32[]{:T(128)} parameter(3)
%param_4.127 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(4)
%param_5.55 = s32[]{:T(128)} parameter(5)
%fusion.272 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_3.246, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_4.127, s32[]{:T(128)} %param_5.55), kind=kLoop, calls=%fused_computation.249.clone
%convolution.171 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} convolution(bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} %fusion.266, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %fusion.272), window={size=1x1x36x2x2 pad=0_0x0_0x35_35x0_0x0_0 rhs_reversal=0x1x1x0x0}, dim_labels=0b43f12_43i12o0->0b12f34
%bitcast.596 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} bitcast(bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} %convolution.171)
ROOT %add.3143 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} add(bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %add.3146, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %bitcast.596)
}
)";
hlo_string += R"(
ENTRY entry {
%constant.4735 = u32[]{:T(128)} constant(2)
%constant.4598 = u32[]{:T(128)} constant(15)
%constant.3341 = u32[]{:T(128)} constant(1)
%partition-id.16 = u32[]{:T(128)} partition-id()
%param.149 = (bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) parameter(0)
%get-tuple-element.21127 = u32[]{:T(128)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=4
%copy.2357 = u32[]{:T(128)} copy(u32[]{:T(128)} %get-tuple-element.21127)
%add.1530 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2357, u32[]{:T(128)} %constant.3341)
%add.1943 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2357, u32[]{:T(128)} %constant.4735)
%get-tuple-element.21124 = bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=1
%bitcast.98 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} bitcast(bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)} %get-tuple-element.21124)
%get-tuple-element.21123 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=0
%get-tuple-element.21125 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=2
%get-tuple-element.21126 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=3
%constant.3344 = s32[16]{0:T(128)} constant({...})
%constant.3339 = u32[256]{0:T(256)} constant({...})
%dynamic-slice.312 = u32[1]{0:T(128)} dynamic-slice(u32[256]{0:T(256)} %constant.3339, u32[]{:T(128)} %partition-id.16), dynamic_slice_sizes={1}
%bitcast.99 = u32[]{:T(128)} bitcast(u32[1]{0:T(128)} %dynamic-slice.312)
%add.1531 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1530, u32[]{:T(128)} %bitcast.99)
%and.40 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1531, u32[]{:T(128)} %constant.4598)
%add.2637 = u32[]{:T(128)} add(u32[]{:T(128)} %bitcast.99, u32[]{:T(128)} %constant.4598)
%subtract.216 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2637, u32[]{:T(128)} %add.1530)
%and.41 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.216, u32[]{:T(128)} %constant.4598)
%subtract.214 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2637, u32[]{:T(128)} %copy.2357)
%and.39 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.214, u32[]{:T(128)} %constant.4598)
%add.1527 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2357, u32[]{:T(128)} %bitcast.99)
%and.38 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1527, u32[]{:T(128)} %constant.4598)
%fusion.1974 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) fusion(s32[16]{0:T(128)} %constant.3344, u32[]{:T(128)} %and.41, u32[]{:T(128)} %and.40, u32[]{:T(128)} %and.39, u32[]{:T(128)} %and.38), kind=kLoop, calls=%fused_computation.1799
%get-tuple-element.20616 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=3
%bitcast.100 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20616)
%get-tuple-element.20615 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=2
%bitcast.101 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20615)
%get-tuple-element.20614 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=1
%bitcast.104 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20614)
%get-tuple-element.20613 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=0
%bitcast.105 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20613)
%copy.2356 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} copy(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %get-tuple-element.21126)
%collective-permute.23 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2356), channel_id=51, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"},\"scoped_memory_configs\":[]}"
%copy.2354 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} copy(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %get-tuple-element.21123)
%collective-permute.22 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2354), channel_id=50, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"},\"scoped_memory_configs\":[]}"
%fusion.264 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} fusion(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2356, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2354), kind=kLoop, calls=%fused_computation.243
%bitcast.97 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} bitcast(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %fusion.264)
%collective-permute.24 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.22), channel_id=52, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"},\"scoped_memory_configs\":[]}"
%fusion.265 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} fusion(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.23, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.22), kind=kLoop, calls=%fused_computation.244
%collective-permute.25 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.23), channel_id=53, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"},\"scoped_memory_configs\":[]}"
%bitcast.103 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} bitcast(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %fusion.265)
%fusion.267 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} fusion(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %bitcast.97, s32[]{:T(128)} %bitcast.100, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %bitcast.98, s32[]{:T(128)} %bitcast.101), kind=kOutput, calls=%fused_computation.246
%fusion.262 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} fusion(bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.21125, bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} %fusion.267, bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %bitcast.103, s32[]{:T(128)} %bitcast.104, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %bitcast.98, s32[]{:T(128)} %bitcast.105), kind=kOutput, calls=%fused_computation.241
ROOT %tuple.1419 = (bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) tuple(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.24, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)} %get-tuple-element.21124, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %fusion.262, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.25, u32[]{:T(128)} %add.1943)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "fusion.262"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "fusion.262"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "fusion.262"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "fusion.262"));
}
TEST_F(LatencyHidingSchedulerTest, MoveCentainConv2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,64]{2,1,0} parameter(3)
cp0 = f32[16,64,256]{2,1,0} collective-permute(p0),
source_target_pairs={{0,1},{1,0}}
cp1 = f32[16,64,256]{2,1,0} collective-permute(p1),
source_target_pairs={{0,1},{1,0}}
cp2 = f32[16,64,256]{2,1,0} collective-permute(cp0),
source_target_pairs={{0,1},{1,0}}
cp3 = f32[16,64,256]{2,1,0} collective-permute(cp1),
source_target_pairs={{0,1},{1,0}}
a0 = f32[16,64,256]{2,1,0} add(cp0, cp1)
c0 = f32[16,64,256]{2,1,0} convolution(p2, p3),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(a0, c0),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT tuple = (f32[16,64,256]{2,1,0}, f32[16,64,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(cp2, cp3, c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "c1"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapLimit) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
collective-permute.3 = bf16[8]{0} collective-permute(p1), source_target_pairs={{0,1},{1,2},{2,3}}
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
add = bf16[8]{0} add(gte0, gte1)
ROOT add2 = bf16[8]{0} add(add, collective-permute.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileNestedOverlapLimit) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.1, negate, gte1)
}
while_cond2 {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body2 {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
while.1 = (bf16[8]{0}, bf16[8]{0}, pred[]) while(param), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while.1), index=0
gte1 = pred[] get-tuple-element(while.1), index=2
bitcast = bf16[8]{0} bitcast(gte0)
negate = bf16[8]{0} negate(bitcast)
collective-permute.2 = bf16[8]{0} collective-permute(negate), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond2, body=while_body2
collective-permute.3 = bf16[8]{0} collective-permute(p1), source_target_pairs={{0,1},{1,2},{2,3}}
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
add = bf16[8]{0} add(gte0, gte1)
ROOT add2 = bf16[8]{0} add(add, collective-permute.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapUnderLimit) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
collective-permute.3 = bf16[8]{0} collective-permute(p1), source_target_pairs={{0,1},{1,2},{2,3}}
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
add = bf16[8]{0} add(gte0, gte1)
ROOT add2 = bf16[8]{0} add(add, collective-permute.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 3;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapLimitAllGather) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[4]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1
gte2 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
all-gather.1 = bf16[8]{0} all-gather(gte0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=1
add0 = bf16[8]{0} add(all-gather.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[4]{0} collective-permute(gte0), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte2)
}
ENTRY entry {
p0 = bf16[4]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[4]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
all-gather.2 = bf16[8]{0} all-gather(p0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=2
gte0 = bf16[4]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT tuple.2 = (bf16[4]{0}, bf16[8]{0}, bf16[8]{0}) tuple(gte0, gte1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_GT(GetIndex(new_instruction_sequence, "all-gather-start.1"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapUnderLimitAllGather) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[4]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1
gte2 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
all-gather.1 = bf16[8]{0} all-gather(gte0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=1
add0 = bf16[8]{0} add(all-gather.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[4]{0} collective-permute(gte0), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte2)
}
ENTRY entry {
p0 = bf16[4]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[4]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
all-gather.2 = bf16[8]{0} all-gather(p0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=2
gte0 = bf16[4]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT tuple.2 = (bf16[4]{0}, bf16[8]{0}, bf16[8]{0}) tuple(gte0, gte1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
sched_config.all_gather_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "all-gather-start.1"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, AllToAllAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
async_computation {
p = f32[2,8,256,256] parameter(0)
ROOT ata = f32[2,8,256,256] all-to-all(p), dimensions={0}, replica_groups={{0,1}}
}
async_computation.2 {
p.2 = f32[2,8,256,256] parameter(0)
ROOT ata.1 = f32[2,8,256,256] all-to-all(p.2), dimensions={0}, replica_groups={{0,1}}
}
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2,8,256,256]{3,2,1,0} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[2,8,256,256]{3,2,1,0} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ata-start = ((f32[2,8,256,256]), f32[2,8,256,256], u32[], u32[]) async-start(
f32[2,8,256,256] %color_operand.1), calls=async_computation,
metadata={op_type="AllToAll" op_name="ata0"}
%ata-start.2 = ((f32[2,8,256,256]), f32[2,8,256,256], u32[], u32[]) async-start(
f32[2,8,256,256] %color_operand.2), calls=async_computation.2,
metadata={op_type="AllToAll" op_name="ata1"}
%ata-done = f32[2,8,256,256] async-done(%ata-start), calls=async_computation,
metadata={op_type="AllToAll" op_name="ata0"}
%ata-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ata-done),
metadata={op_type="Bitcast" op_name="ata0"}
%ata-done.2 = f32[2,8,256,256] async-done(%ata-start.2), calls=async_computation.2,
metadata={op_type="AllToAll" op_name="ata1"}
%ata-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ata-done.2),
metadata={op_type="Bitcast" op_name="ata1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllToAll" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllToAll" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ata-done-bc.2, %ata-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncDone,
new_instruction_sequence, "ata0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncStart,
new_instruction_sequence, "ata0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncDone,
new_instruction_sequence, "ata1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncStart,
new_instruction_sequence, "ata1"));
}
TEST_F(LatencyHidingSchedulerTest, ReleaseOneThatStallsLessFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[1024,2048,2048]{2,1,0} parameter(2)
p3 = f32[2048,2048,2048]{2,1,0} parameter(3)
cp1s = (f32[1024,2048,2048]{2,1,0}, f32[1024,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p2), source_target_pairs={{1,0},{0,3},{3,2}}
cp2s = (f32[2048,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p3), source_target_pairs={{1,0},{0,3},{3,2}}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllToAll" op_name="c0"}
cp1d = f32[1024,2048,2048]{2,1,0} collective-permute-done(cp1s)
cp2d = f32[2048,2048,2048]{2,1,0} collective-permute-done(cp2s)
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[1024,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}) tuple(c0, cp1d, cp2d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
sched_config.all_gather_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config,
std::make_unique<TestLatencyEstimator>())
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2s"),
GetIndex(new_instruction_sequence, "cp1s"));
}
TEST_F(LatencyHidingSchedulerTest, ReleaseStartWhenLatencyDue) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[128,2048,2048]{2,1,0} parameter(1)
p2 = f32[512,2048,2048]{2,1,0} parameter(2)
cp1s = (f32[512,2048,2048]{2,1,0}, f32[512,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p2), source_target_pairs={{1,0},{0,3},{3,2}}
cp1d = f32[512,2048,2048]{2,1,0} collective-permute-done(cp1s)
cp2s = (f32[128,2048,2048]{2,1,0}, f32[128,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p1), source_target_pairs={{1,0},{0,3},{3,2}}
cp2d = f32[128,2048,2048]{2,1,0} collective-permute-done(cp2s)
cp3s = (f32[128,2048,2048]{2,1,0}, f32[128,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(cp2d), source_target_pairs={{1,0},{0,3},{3,2}}
cp3d = f32[128,2048,2048]{2,1,0} collective-permute-done(cp3s)
slice = f32[16,64,256]{2,1,0} slice(f32[512,2048,2048]{2,1,0} cp1d), slice={[0:16], [0:64], [0:256]}
c0 = f32[16,256,256]{2,1,0} convolution(p0, slice),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, slice),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}, f32[128,2048,2048]{2,1,0}, f32[128,2048,2048]{2,1,0}) tuple(c0, c1, cp2d, cp3d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.aggressive_scheduling_policies = true;
sched_config.enable_release_start_policy = true;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config,
std::make_unique<TestLatencyEstimator>())
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2s"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "c0"),
GetIndex(new_instruction_sequence, "cp2d"));
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2d"),
GetIndex(new_instruction_sequence, "cp3s"));
EXPECT_LT(GetIndex(new_instruction_sequence, "cp3s"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "c1"),
GetIndex(new_instruction_sequence, "cp3d"));
}
TEST_F(LatencyHidingSchedulerTest, AsyncTrackerTestForTargetDefinedResources) {
class AsyncTrackerForMyTarget : public AsyncTracker {
enum class MyTargetResourceType {
kTargetResource0 = 0,
kNumTargetResources = 1,
};
public:
explicit AsyncTrackerForMyTarget(const SchedulerConfig& config,
int64_t target_resource0_limit = 3)
: AsyncTracker(config),
target_resource0_limit_(target_resource0_limit) {}
absl::string_view GetResourceName(int64_t resource_type) const override {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return AsyncTracker::GetResourceName(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (resource_type - first_target_resource) {
case static_cast<int64_t>(MyTargetResourceType::kTargetResource0):
return "kTargetResource0";
default:
return "";
}
}
ResourceHazardType GetResourceHazardType(
int64_t resource_type) const override {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return AsyncTracker::GetResourceHazardType(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (resource_type - first_target_resource) {
case static_cast<int64_t>(MyTargetResourceType::kTargetResource0):
return ResourceHazardType::kShareable;
default:
return ResourceHazardType::kUnshareable;
}
}
int64_t GetNumTargetDefinedResources() const override {
return static_cast<int64_t>(MyTargetResourceType::kNumTargetResources);
}
int64_t GetNumAvailableResources(int64_t resource_type) const override {
const int64_t first_target_resource =
AsyncTracker::GetFirstTargetDefinedResource();
CHECK_GE(resource_type, first_target_resource);
CHECK_LT(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (resource_type - first_target_resource) {
case (static_cast<int64_t>(MyTargetResourceType::kTargetResource0)):
return static_cast<int64_t>(target_resource0_limit_);
default:
return 1;
}
}
private:
const int64_t target_resource0_limit_;
};
const int64_t target_resource0_overlap_limit = 5;
AsyncTrackerForMyTarget async_tracker_for_my_target(
SchedulerConfig(), target_resource0_overlap_limit);
CHECK_EQ(async_tracker_for_my_target.GetNumTargetDefinedResources(), 1);
const int64_t target_resource0_index =
static_cast<int64_t>(ResourceType::kTargetDefinedResourcesBound) + 1;
CHECK_EQ(async_tracker_for_my_target.GetResourceName(target_resource0_index),
"kTargetResource0");
CHECK_EQ(
static_cast<int64_t>(async_tracker_for_my_target.GetResourceHazardType(
target_resource0_index)),
static_cast<int64_t>(ResourceHazardType::kShareable));
CHECK_EQ(async_tracker_for_my_target.GetNumAvailableResources(
target_resource0_index),
target_resource0_overlap_limit);
}
TEST_F(LatencyHidingSchedulerTest, AddDeleteOccupierForSharedResource) {
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>> occupiers;
std::function<bool(std::vector<double>)> check_eq = [&occupiers](
std::vector<double>
times) {
if (times.size() != occupiers.size()) {
return false;
}
int64_t i = 0;
for (auto it = occupiers.begin(); it != occupiers.end(); ++it) {
if (std::abs(times[i] - it->second) > 0.0001) {
VLOG(1)
<< "PFT in occupier list does not match the given value (at index "
<< i << "): " << it->second << " vs " << times[i];
return false;
}
i++;
}
return true;
};
HloEdge edge1(3, nullptr);
HloEdge edge2(3, nullptr);
HloEdge edge3(1, nullptr);
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3}));
DefaultSchedulerCore::AddOccupierToResource(1, edge2, occupiers);
CHECK(check_eq({5, 6}));
DefaultSchedulerCore::AddOccupierToResource(1, edge3, occupiers);
CHECK(check_eq({4, 6, 7}));
occupiers.clear();
edge1.SetOriginalLatency(1);
edge2.SetOriginalLatency(2);
edge3.SetOriginalLatency(3);
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({1}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({2, 3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({1}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({2, 4}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({2}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({2, 3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({2}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({4, 5}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({2, 4}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({4, 5}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({1}));
DefaultSchedulerCore::AddOccupierToResource(1, edge2, occupiers);
CHECK(check_eq({1, 3}));
DefaultSchedulerCore::AddOccupierToResource(2, edge3, occupiers);
CHECK(check_eq({1, 4, 6}));
HloEdge edge0(0.5, nullptr);
DefaultSchedulerCore::AddOccupierToResource(2, edge0, occupiers);
CHECK(check_eq({1, 3.5, 4.5, 6.5}));
occupiers.clear();
edge1.SetOriginalLatency(1);
edge2.SetOriginalLatency(2);
edge3.SetOriginalLatency(3);
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
auto res =
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge0, occupiers);
CHECK(!res);
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge1, occupiers);
CHECK(check_eq({4, 5}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3, 5, 6}));
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge2, occupiers);
CHECK(check_eq({2, 4}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({3, 5, 6}));
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge3, occupiers);
CHECK(check_eq({2, 3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
DefaultSchedulerCore::DeleteOccupierFromResource(1, edge1, occupiers);
CHECK(check_eq({4.3333333, 5.3333333}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
DefaultSchedulerCore::DeleteOccupierFromResource(4, edge1, occupiers);
CHECK(check_eq({5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
DefaultSchedulerCore::DeleteOccupierFromResource(4, edge2, occupiers);
CHECK(check_eq({3, 5.5}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
DefaultSchedulerCore::DeleteOccupierFromResource(4, edge3, occupiers);
CHECK(check_eq({3, 4.5}));
}
TEST_F(LatencyHidingSchedulerTest, DepthPressureReduction) {
absl::string_view hlo_string = R"(
HloModule serial_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%parameter.2 = bf16[8]{0} parameter(1)
%parameter.3 = bf16[8]{0} parameter(2)
%parameter.4 = bf16[8]{0} parameter(3)
%collective-permute.2 = bf16[8]{0} collective-permute(parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%a = bf16[8]{0} add(collective-permute.2, parameter.2)
%b = bf16[8]{0} add(a, parameter.3)
%c = bf16[8]{0} add(b, parameter.4)
%d = bf16[8]{0} add(c, parameter.4)
%c1 = bf16[8]{0} copy(d)
%e = bf16[8]{0} add(d, parameter.3)
%c0 = bf16[8]{0} copy(e)
%f = bf16[8]{0} add(e, parameter.2)
%h = bf16[8]{0} add(c0, b)
%g = bf16[8]{0} add(c1, c)
%i = bf16[8]{0} add(f, a)
ROOT %t = (bf16[8]{0}, bf16[8]{0}, bf16[8]{0}, bf16[8]{0}) tuple(f, g, h, i)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
sched_config.memory_limit = 0;
sched_config.depth_based_memory_pressure_reduction = true;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* f = FindInstruction(hlo_module.get(), "f");
const HloInstruction* g = FindInstruction(hlo_module.get(), "g");
EXPECT_LT(PositionInVector(new_instruction_sequence, g),
PositionInVector(new_instruction_sequence, f));
}
TEST_F(LatencyHidingSchedulerTest, RerunWithSmallerMemoryLimit) {
absl::string_view hlo_string = R"(
HloModule rerun_scheduler_test, is_scheduled=true
ENTRY main {
p0 = bf16[8]{0} parameter(0)
c = bf16[] constant(0)
b = bf16[43]{0} broadcast(c), dimensions={}
s = bf16[1]{0} slice(b), slice={[0:1]}
cp = bf16[8]{0} collective-permute(p0), source_target_pairs={{0,1},{1,2},{2,3}}
ROOT tuple = (bf16[8]{0}, bf16[1]{0}) tuple(cp, s)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
sched_config.memory_limit = 110;
sched_config.rerun = 1;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* s = FindInstruction(hlo_module.get(), "s");
const HloInstruction* cps =
FindInstruction(hlo_module.get(), "collective-permute-start");
EXPECT_LT(PositionInVector(new_instruction_sequence, s),
PositionInVector(new_instruction_sequence, cps));
}
TEST_F(LatencyHidingSchedulerTest, MultipleAsyncDoneOperationsDoNotCreateLoop) {
absl::string_view hlo_string = R"(
HloModule multiple_async_done_scheduler_test, is_scheduled=true
called_computation {
ROOT %param = s32[<=4096]{0:T(8)M(1024)} parameter(0)
}
ENTRY main {
%while_body_forward_pass_input_tuple = (s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}) parameter(0), backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
%get-tuple-element.0 = s32[<=4096]{0:T(8)M(1024)} get-tuple-element(
(s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}) %while_body_forward_pass_input_tuple),
index=0, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
%get-tuple-element.1 = s32[<=4096]{0:T(8)M(1024)} get-tuple-element(
(s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}) %while_body_forward_pass_input_tuple),
index=1, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
%call-start.1 = ((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)})
call-start(s32[<=4096]{0:T(8)M(1024)} %get-tuple-element.1),
async_execution_thread="sparsecore", to_apply=%called_computation
%call-done.1 = s32[<=4096]{0:T(8)M(1024)}
call-done(((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)}) %call-start.1)
%call-start.2 = ((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)})
call-start(s32[<=4096]{0:T(8)M(1024)} %call-done.1),
async_execution_thread="sparsecore", to_apply=%called_computation
%call-done.2 = s32[<=4096]{0:T(8)M(1024)}
call-done(((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)}) %call-start.2)
%call-start.3 = ((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)})
call-start(s32[<=4096]{0:T(8)M(1024)} %get-tuple-element.0),
async_execution_thread="sparsecore", to_apply=%called_computation
%call-done.3 = s32[<=4096]{0:T(8)M(1024)}
call-done(((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)}) %call-start.3)
ROOT %tuple.6 = (s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)})
tuple(s32[<=4096]{0:T(8)M(1024)} %call-done.2, s32[<=4096]{0:T(8)M(1024)} %call-done.3),
backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
}
TEST_F(LatencyHidingSchedulerTest, CopyScheduling) {
absl::string_view hlo_string = R"(
HloModule EinsumTest, is_scheduled=true
ENTRY AddR2 {
y_host = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(1)
z = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(2)
x = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(0)
convolution = bf16[12800,12800]{1,0:T(8,128)(2,1)} convolution(x, z), dim_labels=bf_io->bf
copy-start = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}, u32[]{:S(2)}) copy-start(y_host)
copy-done = bf16[12800,12800]{1,0:T(8,128)(2,1)} copy-done(copy-start)
ROOT convolution.1 = bf16[12800,12800]{1,0:T(8,128)(2,1)} convolution(convolution, copy-done), dim_labels=bf_io->bf
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
const HloInstruction* conv = FindInstruction(hlo_module.get(), "convolution");
const HloInstruction* cps = FindInstruction(hlo_module.get(), "copy-start");
const HloInstruction* cpd = FindInstruction(hlo_module.get(), "copy-done");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_LT(PositionInVector(new_instruction_sequence, cps),
PositionInVector(new_instruction_sequence, conv));
EXPECT_LT(PositionInVector(new_instruction_sequence, conv),
PositionInVector(new_instruction_sequence, cpd));
XLA_VLOG_LINES(1, hlo_module->ToString());
}
TEST_F(LatencyHidingSchedulerTest, MaxCopyScheduling) {
absl::string_view hlo_string = R"(
HloModule EinsumTest, is_scheduled=true
ENTRY AddR2 {
y_host = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(1)
q_host = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(3)
z = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(2)
x = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(0)
convolution = bf16[12800,12800]{1,0:T(8,128)(2,1)} convolution(x, z), dim_labels=bf_io->bf
copy-start = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}, u32[]{:S(2)}) copy-start(y_host)
copy-done = bf16[12800,12800]{1,0:T(8,128)(2,1)} copy-done(copy-start)
copy-start2 = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}, u32[]{:S(2)}) copy-start(q_host)
copy-done2 = bf16[12800,12800]{1,0:T(8,128)(2,1)} copy-done(copy-start2)
ROOT t = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}) tuple(copy-done2, copy-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
const HloInstruction* conv = FindInstruction(hlo_module.get(), "convolution");
const HloInstruction* cps = FindInstruction(hlo_module.get(), "copy-start");
const HloInstruction* cps2 = FindInstruction(hlo_module.get(), "copy-start2");
const HloInstruction* cpd2 = FindInstruction(hlo_module.get(), "copy-done2");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_LT(PositionInVector(new_instruction_sequence, cps2),
PositionInVector(new_instruction_sequence, conv));
EXPECT_LT(PositionInVector(new_instruction_sequence, conv),
PositionInVector(new_instruction_sequence, cpd2));
EXPECT_LT(PositionInVector(new_instruction_sequence, cps),
PositionInVector(new_instruction_sequence, cpd2));
XLA_VLOG_LINES(1, hlo_module->ToString());
}
TEST_F(LatencyHidingSchedulerTest, ScheduleLoopPeeledSendDoneBeforeWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) parameter(0)
gte0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element(param), index=0
gte1 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element(param), index=1
%add.0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} add(gte0, gte1)
gte2 = pred[] get-tuple-element(param), index=2
ROOT tuple = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) tuple(%add.0, gte1, gte2)
}
ENTRY %entry {
%p0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} parameter(0)
%p1 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} parameter(1)
%after-all = token[] after-all()
%send = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) send(bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} %p0, token[] %after-all), channel_id=1246, is_host_transfer=true, frontend_attributes={_xla_host_transfer_handler_name="xla_megascale_runtime",_xla_host_transfer_rendezvous="collective-permute.145_0",_xla_megascale_target="{{200000->100000},{200001->100001},{200002->100002},{200003->100003},{200004->100004},{200005->100005},{200006->100006},{200007->100007},{200008->100008},{200009->100009},{200010->100010},{200011->100011},{200012->100012},{200013->100013},{200014->100014},{200015->100015},{200016->100016},{200017->100017},{200018->100018},{200019->100019},{200020->100020},{200021->100021},{200022->100022},{200023->100023},{200024->100024},{200025->100025},{200026->100026},{200027->100027},{200028->100028},{200029->100029},{200030->100030},{200031->100031},{200032->100032},{200033->100033},{200034->100034},{200035->100035},{200036->100036},{200037->100037},{200038->100038},{200039->100039},{200040->100040},{200041->100041},{200042->100042},{200043->100043},{200044->100044},{200045->100045},{200046->100046},{200047->100047},{200048->100048},{200049->100049},{200050->100050},{200051->100051},{200052->100052},{200053->100053},{200054->100054},{200055->100055},{200056->100056},{200057->100057},{200058->100058},{200059->100059},{200060->100060},{200061->100061},{200062->100062},{200063->100063},{200064->100064},{200065->100065},{200066->100066},{200067->100067},{200068->100068},{200069->100069},{200070->100070},{200071->100071},{200072->100072},{200073->100073},{200074->100074},{200075->100075},{200076->100076},{200077->100077},{200078->100078},{200079->100079},{200080->100080},{200081->100081},{200082->100082},{200083->100083},{200084->100084},{200085->100085},{200086->100086},{200087->100087},{200088->100088},{200089->100089},{200090->100090},{200091->100091},{200092->100092},{200093->100093},{200094->100094},{200095->100095},{200096->100096},{200097->100097},{200098->100098},{200099->100099},{200100->100100},{200101->100101},{200102->100102},{200103->100103},{200104->100104},{200105->100105},{200106->100106},{200107->100107},{200108->100108},{200109->100109},{200110->100110},{200111->100111},{200112->100112},{200113->100113},{200114->100114},{200115->100115},{200116->100116},{200117->100117},{200118->100118},{200119->100119},{200120->100120},{200121->100121},{200122->100122},{200123->100123},{200124->100124},{200125->100125},{200126->100126},{200127->100127}}",_xla_megascale_transfer_type="ONE_TO_ONE"}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_send":{"non_source_slice_ids":[0]}}}
%recv = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) recv(token[] %after-all), channel_id=1247, is_host_transfer=true, frontend_attributes={_xla_host_transfer_handler_name="xla_megascale_runtime",_xla_host_transfer_rendezvous="collective-permute.145_0",_xla_megascale_target="{{200000->100000},{200001->100001},{200002->100002},{200003->100003},{200004->100004},{200005->100005},{200006->100006},{200007->100007},{200008->100008},{200009->100009},{200010->100010},{200011->100011},{200012->100012},{200013->100013},{200014->100014},{200015->100015},{200016->100016},{200017->100017},{200018->100018},{200019->100019},{200020->100020},{200021->100021},{200022->100022},{200023->100023},{200024->100024},{200025->100025},{200026->100026},{200027->100027},{200028->100028},{200029->100029},{200030->100030},{200031->100031},{200032->100032},{200033->100033},{200034->100034},{200035->100035},{200036->100036},{200037->100037},{200038->100038},{200039->100039},{200040->100040},{200041->100041},{200042->100042},{200043->100043},{200044->100044},{200045->100045},{200046->100046},{200047->100047},{200048->100048},{200049->100049},{200050->100050},{200051->100051},{200052->100052},{200053->100053},{200054->100054},{200055->100055},{200056->100056},{200057->100057},{200058->100058},{200059->100059},{200060->100060},{200061->100061},{200062->100062},{200063->100063},{200064->100064},{200065->100065},{200066->100066},{200067->100067},{200068->100068},{200069->100069},{200070->100070},{200071->100071},{200072->100072},{200073->100073},{200074->100074},{200075->100075},{200076->100076},{200077->100077},{200078->100078},{200079->100079},{200080->100080},{200081->100081},{200082->100082},{200083->100083},{200084->100084},{200085->100085},{200086->100086},{200087->100087},{200088->100088},{200089->100089},{200090->100090},{200091->100091},{200092->100092},{200093->100093},{200094->100094},{200095->100095},{200096->100096},{200097->100097},{200098->100098},{200099->100099},{200100->100100},{200101->100101},{200102->100102},{200103->100103},{200104->100104},{200105->100105},{200106->100106},{200107->100107},{200108->100108},{200109->100109},{200110->100110},{200111->100111},{200112->100112},{200113->100113},{200114->100114},{200115->100115},{200116->100116},{200117->100117},{200118->100118},{200119->100119},{200120->100120},{200121->100121},{200122->100122},{200123->100123},{200124->100124},{200125->100125},{200126->100126},{200127->100127}}",_xla_megascale_transfer_type="ONE_TO_ONE"}, control-predecessors={%send}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_recv":{"non_target_slice_ids":[1]}}}
%recv-done = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, token[]) recv-done((bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) %recv), channel_id=1247, is_host_transfer=true, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_recv":{"non_target_slice_ids":[1]}}}
%get-tuple-element = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element((bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, token[]) %recv-done), index=0
%send-done = token[] send-done((bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) %send), channel_id=1246, is_host_transfer=true, control-predecessors={%recv-done}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_send":{"non_source_slice_ids":[0]}}}
%p2 = pred[] parameter(2)
tuple = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) tuple(%get-tuple-element, %p1, %p2)
while = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
sched_config.all_gather_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
EXPECT_LT(GetIndex(new_instruction_sequence, "send-done"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherWithSelectiveOverlap) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c2 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, c0)
}
)";
class SelectiveOverlapAsyncTracker : public AsyncTracker {
public:
explicit SelectiveOverlapAsyncTracker(const SchedulerConfig& sched_config)
: AsyncTracker(sched_config) {}
ResourceHazardType GetResourceHazardType(
int64_t resource_type) const override {
if (resource_type == ResourceTypeToIndex(ResourceType::kAllGather)) {
return ResourceHazardType::kSelective;
}
if (resource_type == AsyncTracker::GetFirstTargetDefinedResource()) {
return ResourceHazardType::kNonextendable;
}
return AsyncTracker::GetResourceHazardType(resource_type);
}
ResourcesVector GetResourcesFromInstruction(
const HloInstruction& hlo) const override {
ResourcesVector result = AsyncTracker::GetResourcesFromInstruction(hlo);
if (hlo.opcode() == HloOpcode::kAllGatherStart) {
result.push_back({AsyncTracker::GetFirstTargetDefinedResource(),
ResourceUsageType::kResourceRelease});
}
return result;
}
absl::InlinedVector<int64_t, 1> GetReleasedNonextendableResourcesFromVector(
const ResourcesVector& resources) const override {
absl::InlinedVector<int64_t, 1> non_extendable_resources;
for (const ResourcePair& resource : resources) {
if (GetResourceHazardType(resource.first) ==
ResourceHazardType::kNonextendable) {
non_extendable_resources.push_back({resource.first});
}
}
return non_extendable_resources;
}
void PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const override {
for (const HloInstruction* instr :
schedule_graph->GetOriginalInstrList()) {
if (instr->name() == "c2") {
schedule_graph->GetNode(instr).SetValuableForSelectiveOverlap(false);
}
}
}
};
SchedulerConfig sched_config = GetDefaultSchedConfig();
sched_config.enable_selective_resources = true;
std::unique_ptr<AsyncTracker> async_tracker =
std::make_unique<SelectiveOverlapAsyncTracker>(sched_config);
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config,
std::make_unique<ApproximateLatencyEstimator>(),
std::move(async_tracker))
.ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
int c0_index = GetIndex(new_instruction_sequence, "c0");
int c1_index = GetIndex(new_instruction_sequence, "c1");
int c2_index = GetIndex(new_instruction_sequence, "c2");
int ag_start_index = GetIndex(new_instruction_sequence, "ag-start");
int ag_done_index = GetIndex(new_instruction_sequence, "ag-done");
EXPECT_LT(c0_index, ag_start_index);
EXPECT_LT(ag_start_index, c1_index);
EXPECT_LT(c1_index, c2_index);
EXPECT_LT(c2_index, ag_done_index);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/latency_hiding_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/latency_hiding_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d00504b-3897-43c0-a93d-bf046bfb5a6e | cpp | tensorflow/tensorflow | conditional_code_motion | third_party/xla/xla/service/conditional_code_motion.cc | third_party/xla/xla/service/conditional_code_motion_test.cc | #include "xla/service/conditional_code_motion.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace conditional_opt {
HloInstruction* CloneNestedTuples(HloInstruction* tuple) {
if (!tuple->shape().IsTuple()) {
return tuple;
}
std::vector<HloInstruction*> tuple_users, gte_users;
for (int i = 0; i < tuple->shape().tuple_shapes_size(); ++i) {
gte_users.push_back(nullptr);
}
for (auto* tuple_user : tuple->users()) {
VLOG(2) << "tuple_user: " << tuple_user->ToString() << "\n";
if (tuple_user->opcode() != HloOpcode::kGetTupleElement ||
tuple_user == tuple->parent()->root_instruction()) {
tuple_users.push_back(tuple_user);
} else {
gte_users[tuple_user->tuple_index()] = tuple_user;
}
}
if (!tuple_users.empty() || tuple->user_count() == 0 ||
tuple == tuple->parent()->root_instruction()) {
VLOG(5) << "CLONING: " << tuple->ToString() << "\n";
int64_t tuple_size = tuple->shape().tuple_shapes_size();
std::vector<HloInstruction*> operands;
operands.reserve(tuple_size);
for (int64_t j = 0; j < tuple_size; ++j) {
HloInstruction* gte =
(gte_users[j] == nullptr)
? tuple->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
tuple->shape().tuple_shapes(j), tuple, j))
: gte_users[j];
CHECK_NE(gte, nullptr);
operands.push_back(CloneNestedTuples(gte));
}
HloInstruction* new_tuple =
tuple->parent()->AddInstruction(HloInstruction::CreateTuple(operands));
VLOG(2) << "new_tuple: " << new_tuple->ToString() << "\n";
if (tuple == tuple->parent()->root_instruction()) {
tuple->parent()->set_root_instruction(new_tuple,
true);
} else {
for (auto tuple_user : tuple_users) {
TF_CHECK_OK(tuple->ReplaceUseWithDifferentShape(tuple_user, new_tuple));
}
}
return new_tuple;
}
for (auto gte_user : gte_users) {
if (gte_user != nullptr) {
auto gte = CloneNestedTuples(gte_user);
CHECK_NE(gte, nullptr);
}
}
return tuple;
}
class BoundaryVisitor {
public:
explicit BoundaryVisitor(HloInstruction* conditional) {
Boundary b(Boundary::Position::kInsideBranch);
b.mutable_operands().push_back(conditional);
worklist_.push_back(b);
}
BoundaryVisitor() {}
Boundary PopNextBoundary() {
CHECK(!worklist_.empty());
Boundary b = worklist_.front();
worklist_.pop_front();
while (!worklist_.empty() && ContainsKey(visited_, b)) {
b = worklist_.front();
worklist_.pop_front();
}
visited_.insert(b);
return b;
}
void AddToWorkList(const Boundary& b) {
CHECK(!b.operands().empty());
worklist_.push_back(b);
}
bool HasNextBoundary() {
while (!worklist_.empty()) {
Boundary b = worklist_.front();
if (!ContainsKey(visited_, b)) {
break;
}
worklist_.pop_front();
}
return !worklist_.empty();
}
private:
std::deque<Boundary> worklist_;
absl::flat_hash_set<Boundary> visited_;
};
template <class OpCollection>
int64_t CountNonLeafOps(const OpCollection& ops) {
absl::flat_hash_set<HloInstruction*> op_set;
for (auto op : ops) {
if (!op_set.contains(op) && op->opcode() != HloOpcode::kConstant) {
op_set.insert(op);
}
}
return op_set.size();
}
int64_t ReusesCarriedBy(HloOpcode op, HloOpcode user) {
switch (user) {
case HloOpcode::kGetTupleElement:
return 0;
case HloOpcode::kConvert:
switch (op) {
case HloOpcode::kConvolution:
case HloOpcode::kDot:
return 0;
default:
break;
}
break;
default:
break;
}
switch (op) {
case HloOpcode::kParameter:
case HloOpcode::kConstant:
case HloOpcode::kGetTupleElement:
return 0;
case HloOpcode::kConditional:
return 10;
default:
return -10;
}
}
bool WorthHoisting(HloOpcode op, HloOpcode child_op) {
switch (op) {
case HloOpcode::kConvert:
switch (child_op) {
case HloOpcode::kAllReduce:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
return true;
default:
return false;
}
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
switch (child_op) {
case HloOpcode::kParameter:
return false;
default:
return true;
}
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kReduce:
case HloOpcode::kConstant:
case HloOpcode::kReshape:
case HloOpcode::kBroadcast:
return true;
default:
if (HloInstruction::IsOpElementwise(op)) {
return true;
}
return false;
}
}
bool InstructionWithinBranchIdentical(
const std::vector<HloInstruction*>& instructions,
bool is_layout_sensitive) {
auto eq_operand = [&](const HloInstruction* a, const HloInstruction* b) {
bool eq_operands = is_layout_sensitive
? ShapeUtil::Equal(a->shape(), b->shape())
: ShapeUtil::Compatible(a->shape(), b->shape());
return eq_operands;
};
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
if (instructions.empty()) {
return false;
}
if (instructions[0]->IsCrossModuleAllReduce()) {
return std::all_of(
instructions.begin(), instructions.end(),
[&](HloInstruction* instruction) {
if (!instruction->IsCrossModuleAllReduce()) {
return false;
}
auto old_channel_id = instruction->channel_id();
instruction->set_channel_id(instructions[0]->channel_id());
bool eq_instructions = instructions[0]->Identical(
*instruction, eq_operand, eq_computations, is_layout_sensitive);
instruction->set_channel_id(old_channel_id);
return eq_instructions;
});
}
return std::all_of(instructions.begin(), instructions.end(),
[&](HloInstruction* instruction) {
return instructions[0]->Identical(
*instruction, eq_operand, eq_computations,
is_layout_sensitive);
});
}
void CopyOutOfConditional(
Boundary& boundary, HloInstruction* conditional,
absl::flat_hash_map<Boundary, Boundary>& hoisted_boundaries) {
CHECK(boundary.IsInsideBranch());
absl::InlinedVector<HloInstruction*, 4> new_operands;
const HloInstruction* branch0_inst = boundary.operands()[0];
for (int i = 0; i < branch0_inst->operands().size(); ++i) {
Boundary operand_boundary(boundary.GetPosition());
for (HloInstruction* operand : boundary.operands()) {
operand_boundary.mutable_operands().push_back(operand->operands()[i]);
}
VLOG(2) << "Looking for: " << operand_boundary.ToString();
auto hoisted_boundaries_it = hoisted_boundaries.find(operand_boundary);
CHECK(hoisted_boundaries_it != hoisted_boundaries.end());
Boundary hoisted_boundary = hoisted_boundaries_it->second;
CHECK(hoisted_boundary.IsOutsideBranchUser());
CHECK_EQ(hoisted_boundary.operands().size(), 1);
new_operands.push_back(hoisted_boundary.operands()[0]);
}
HloInstruction* new_instruction = conditional->parent()->AddInstruction(
branch0_inst->CloneWithNewOperands(branch0_inst->shape(), new_operands));
VLOG(2) << "new instruction:" << new_instruction->ToString();
Boundary hoisted_boundary(Boundary::Position::kOutsideBranchUser);
hoisted_boundary.mutable_operands().push_back(new_instruction);
hoisted_boundaries[boundary] = hoisted_boundary;
}
void CopyIntoConditional(
Boundary& boundary, HloInstruction* conditional,
absl::flat_hash_map<Boundary, Boundary>& hoisted_boundaries) {
CHECK(boundary.IsOutsideBranchUser() || boundary.IsOutsideBranchOperand());
CHECK_EQ(boundary.operands().size(), 1);
int num_branches = conditional->branch_count();
std::vector<absl::InlinedVector<HloInstruction*, 4>> new_operands(
num_branches);
HloInstruction* op = boundary.operands()[0];
for (HloInstruction* operand : op->operands()) {
Boundary operand_boundary(boundary.GetPosition());
operand_boundary.mutable_operands().push_back(operand);
VLOG(2) << "Looking for: " << operand_boundary.ToString();
auto hoisted_boundaries_it = hoisted_boundaries.find(operand_boundary);
if (hoisted_boundaries_it != hoisted_boundaries.end()) {
Boundary hoisted_boundary = hoisted_boundaries_it->second;
CHECK(hoisted_boundary.IsInsideBranch());
CHECK_EQ(hoisted_boundary.operands().size(), num_branches);
for (int j = 0; j < num_branches; ++j) {
new_operands[j].push_back(hoisted_boundary.operands()[j]);
}
} else {
for (int j = 0; j < num_branches; ++j) {
switch (operand->opcode()) {
case HloOpcode::kConstant: {
auto new_operand =
conditional->branch_computation(j)->AddInstruction(
operand->Clone());
VLOG(2) << "new instruction:" << new_operand->ToString();
new_operands[j].push_back(new_operand);
break;
}
case HloOpcode::kGetTupleElement: {
auto gte = Cast<HloGetTupleElementInstruction>(operand);
int64_t index = gte->tuple_index();
HloInstruction* root =
conditional->branch_computation(j)->root_instruction();
CHECK(root->opcode() == HloOpcode::kTuple &&
index < root->operand_count())
<< root->ToString() << " " << gte->ToString();
auto new_operand = root->mutable_operand(index);
VLOG(2) << "new instruction:" << new_operand->ToString();
new_operands[j].push_back(new_operand);
break;
}
default:
LOG(FATAL) << "Unexpected out-of-boundary instruction:"
<< operand->ToString() << "\n";
}
}
}
}
Boundary hoisted_boundary(Boundary::Position::kInsideBranch);
for (int j = 0; j < num_branches; ++j) {
HloInstruction* new_instruction =
conditional->branch_computation(j)->AddInstruction(
op->CloneWithNewOperands(op->shape(), new_operands[j]));
VLOG(2) << "new instruction:" << new_instruction->ToString();
hoisted_boundary.mutable_operands().push_back(new_instruction);
}
hoisted_boundaries[boundary] = hoisted_boundary;
}
absl::flat_hash_set<int64_t> FindSpecialConverts(HloInstruction* old_root,
int branch_count,
HloInstruction* conditional,
bool is_layout_sensitive) {
absl::flat_hash_set<int64_t> special_convert;
auto convert_invalid =
[](const HloInstruction* convert_set_candidate) -> bool {
bool invalid_user = absl::c_any_of(
convert_set_candidate->users(), [](const HloInstruction* user) -> bool {
return (user->opcode() == HloOpcode::kConvert);
});
bool invalid_producer =
absl::c_any_of(convert_set_candidate->operands(),
[](const HloInstruction* operand) -> bool {
return (operand->opcode() == HloOpcode::kConvert);
});
return (invalid_user || invalid_producer);
};
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
if (old_root->operand(operand_num)->opcode() != HloOpcode::kConvert) {
continue;
}
bool replica = true;
HloInstruction* special_convert_candidate =
old_root->mutable_operand(operand_num);
auto repeated =
absl::c_count_if(old_root->operands(),
[&](const HloInstruction* operand) -> bool {
return (special_convert_candidate == operand);
}) > 1;
if (convert_invalid(special_convert_candidate) || repeated) {
continue;
}
for (int others = 1; others < branch_count; ++others) {
HloInstruction* others_root =
conditional->branch_computation(others)->root_instruction();
const HloInstruction* other_convert = others_root->operand(operand_num);
if (other_convert->opcode() != HloOpcode::kConvert ||
convert_invalid(other_convert)) {
replica = false;
break;
}
bool eq_shape =
is_layout_sensitive
? ShapeUtil::Equal(other_convert->shape(),
special_convert_candidate->shape()) &&
ShapeUtil::Equal(
other_convert->operand(0)->shape(),
special_convert_candidate->operand(0)->shape())
: ShapeUtil::Compatible(other_convert->shape(),
special_convert_candidate->shape()) &&
ShapeUtil::Compatible(
other_convert->operand(0)->shape(),
special_convert_candidate->operand(0)->shape());
if (!eq_shape) {
replica = false;
break;
}
auto repeated =
absl::c_count_if(others_root->operands(),
[&](const HloInstruction* operand) -> bool {
return (special_convert_candidate == operand);
}) > 1;
if (repeated) {
replica = false;
break;
}
}
if (replica) {
special_convert.insert(operand_num);
}
}
return special_convert;
}
absl::Status RestructureConditionalInstruction(HloComputation* computation,
HloInstruction* conditional) {
HloInstruction* old_root = computation->root_instruction();
std::vector<HloInstruction*> new_operands;
int cur_index = 0;
for (; cur_index < ShapeUtil::TupleElementCount(conditional->shape());
++cur_index) {
new_operands.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(conditional->shape(), cur_index),
conditional, cur_index)));
}
HloInstruction* new_tuple =
computation->AddInstruction(HloInstruction::CreateTuple(new_operands));
if (old_root == conditional) {
computation->set_root_instruction(new_tuple);
} else {
std::vector<HloInstruction*> new_tuple_users;
for (auto conditional_user : conditional->users()) {
auto is_new_gte = absl::c_find_if(
new_operands,
[&](HloInstruction* instr) { return instr == conditional_user; });
if (is_new_gte == new_operands.end()) {
new_tuple_users.push_back(conditional_user);
}
}
for (auto new_tuple_user : new_tuple_users) {
TF_RETURN_IF_ERROR(
conditional->ReplaceUseWith(new_tuple_user, new_tuple));
}
}
VLOG(2) << "computation after root restructure:\n" << computation->ToString();
return absl::OkStatus();
}
absl::StatusOr<bool> ConvertSpecialMove(HloInstruction* conditional,
bool is_layout_sensitive) {
int branch_count = conditional->branch_count();
if (branch_count <= 0) {
return false;
}
for (int branch_num = 0; branch_num < branch_count; ++branch_num) {
HloInstruction* branch_root =
conditional->branch_computation(branch_num)->root_instruction();
if (branch_root->opcode() != HloOpcode::kTuple) {
return false;
}
}
HloInstruction* old_root =
conditional->branch_computation(0)->root_instruction();
VLOG(2) << "BEFORE :" << conditional->GetModule()->ToString();
auto find_gte = [](const HloInstruction* conditional_result,
int64_t index) -> HloInstruction* {
for (HloInstruction* instr : conditional_result->users()) {
if (instr->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
if (instr->tuple_index() == index) {
return instr;
}
}
return nullptr;
};
absl::flat_hash_set<int64_t> special_convert = FindSpecialConverts(
old_root, branch_count, conditional, is_layout_sensitive);
if (special_convert.empty()) {
return false;
}
TF_RETURN_IF_ERROR(
RestructureConditionalInstruction(conditional->parent(), conditional));
for (int branch = 0; branch < branch_count; branch++) {
old_root = conditional->branch_computation(branch)->root_instruction();
absl::flat_hash_map<HloInstruction*, int64_t> map_inst_to_tuple_index;
std::vector<HloInstruction*> new_operands(old_root->operand_count());
absl::flat_hash_set<HloInstruction*> to_hoist_set;
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
map_inst_to_tuple_index[old_root->mutable_operand(operand_num)] =
operand_num;
}
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
HloInstruction* hoist = old_root->mutable_operand(operand_num);
if (!special_convert.contains(operand_num)) {
new_operands[operand_num] = old_root->mutable_operand(operand_num);
continue;
}
to_hoist_set.insert(hoist);
int64_t new_tuple_count = old_root->operand_count();
bool inplace = true;
CHECK(!hoist->operands().empty());
for (HloInstruction* prod : hoist->operands()) {
if (inplace) {
map_inst_to_tuple_index[prod] = map_inst_to_tuple_index[hoist];
new_operands[map_inst_to_tuple_index[hoist]] = prod;
inplace = false;
} else {
map_inst_to_tuple_index[prod] = new_tuple_count++;
new_operands.push_back(prod);
}
}
}
HloComputation* cur_branch = conditional->branch_computation(branch);
HloInstruction* new_branch_root =
cur_branch->AddInstruction(HloInstruction::CreateTuple(new_operands));
cur_branch->set_root_instruction(new_branch_root, true );
TF_CHECK_OK(cur_branch->RemoveInstruction(old_root));
if (branch != 0) {
continue;
}
HloComputation* conditional_parent = conditional->parent();
HloInstruction* newconditional =
conditional_parent->AddInstruction(HloInstruction::CreateConditional(
cur_branch->root_instruction()->shape(),
conditional->mutable_operand(0),
absl::MakeSpan(conditional->branch_computations()),
absl::MakeSpan(conditional->operands()).subspan(1)));
TF_RETURN_IF_ERROR(
conditional->ReplaceAllUsesWithDifferentShape(newconditional));
TF_CHECK_OK(conditional_parent->RemoveInstruction(conditional));
conditional = newconditional;
for (HloInstruction* hoist : to_hoist_set) {
VLOG(2) << "Hoisting instruction:" << hoist->ToString();
int64_t hoist_index = map_inst_to_tuple_index[hoist];
HloInstruction* gte_hoist = find_gte(conditional, hoist_index);
CHECK(gte_hoist != nullptr);
std::vector<HloInstruction*> new_operands;
for (HloInstruction* op : hoist->operands()) {
HloInstruction* gte = conditional_parent->AddInstruction(
HloInstruction::CreateGetTupleElement(op->shape(), conditional,
map_inst_to_tuple_index[op]));
new_operands.push_back(gte);
}
HloInstruction* hoisted = conditional_parent->AddInstruction(
hoist->CloneWithNewOperands(hoist->shape(), new_operands));
VLOG(2) << "Hoisted instruction in parent:" << hoisted->ToString();
TF_RETURN_IF_ERROR(gte_hoist->ReplaceAllUsesWith(hoisted));
TF_CHECK_OK(conditional_parent->RemoveInstruction(gte_hoist));
}
}
VLOG(2) << "AFTER :" << conditional->GetModule()->ToString();
return true;
}
absl::StatusOr<bool> ConditionalCodeMotion::MoveInstructionOut(
HloInstruction* conditional, std::vector<Boundary>& to_move_out,
std::vector<Boundary>& new_boundaries) {
if (to_move_out.empty()) {
return false;
}
VLOG(1) << "Modifying code--number of boundaries to move out of conditional:"
<< to_move_out.size() << "\n";
HloComputation* conditional_parent = conditional->parent();
std::vector<HloInstruction*> old_conditional_users = conditional->users();
absl::flat_hash_map<Boundary, Boundary> hoisted_boundaries;
VLOG(2) << "before opt:"
<< conditional_parent->ToString(HloPrintOptions::Fingerprint())
<< "\n";
int64_t op_index = 0;
for (const Boundary& b : new_boundaries) {
HloInstruction* op = b.operands()[0];
CHECK(op != nullptr);
VLOG(2) << "Mapping new boundary instr: " << op->ToString() << "\n";
HloInstruction* gtr = conditional_parent->AddInstruction(
HloInstruction::CreateGetTupleElement(op->shape(), conditional,
op_index++));
Boundary b2(Boundary::Position::kOutsideBranchUser);
b2.mutable_operands().push_back(gtr);
hoisted_boundaries[b] = b2;
}
for (int64_t i = to_move_out.size() - 1; i >= 0; i--) {
CopyOutOfConditional(to_move_out[i], conditional, hoisted_boundaries);
}
VLOG(2) << "Done copy branch instructions out\n"
<< conditional_parent->ToString(HloPrintOptions::Fingerprint())
<< "\n";
for (auto user_instr : old_conditional_users) {
VLOG(2) << "Checking conditional user: " << user_instr->ToString() << "\n";
CHECK(user_instr->opcode() == HloOpcode::kGetTupleElement);
auto tuple_opd = static_cast<HloGetTupleElementInstruction*>(user_instr);
int64_t index = tuple_opd->tuple_index();
Boundary old_user_boundary(Boundary::Position::kInsideBranch);
for (const HloComputation* called_computation :
conditional->called_computations()) {
HloInstruction* root = called_computation->root_instruction();
CHECK(root->operands().size() > index);
old_user_boundary.mutable_operands().push_back(root->operands()[index]);
}
CHECK(ContainsKey(hoisted_boundaries, old_user_boundary));
HloInstruction* new_opd =
hoisted_boundaries[old_user_boundary].operands()[0];
CHECK(new_opd != nullptr);
VLOG(2) << "Try replace all uses of :" << old_user_boundary.ToString()
<< "\n";
TF_RETURN_IF_ERROR(user_instr->ReplaceAllUsesWith(new_opd));
TF_RETURN_IF_ERROR(conditional_parent->RemoveInstruction(user_instr));
}
VLOG(2) << "Done changing conditional users\n"
<< conditional_parent->ToString() << "\n";
int64_t branch_count = conditional->branch_count();
for (int i = 0; i < branch_count; i++) {
auto computation = conditional->branch_computation(i);
std::vector<HloInstruction*> elements;
for (const auto& b1 : new_boundaries) {
HloInstruction* op = b1.operands()[i];
CHECK(op != nullptr);
VLOG(2) << "Adding to root " << i << " with " << op->ToString() << "\n";
elements.push_back(op);
}
HloInstruction* tuple =
computation->AddInstruction(HloInstruction::CreateTuple(elements));
computation->set_root_instruction(tuple, true);
VLOG(2) << "computation is :" << computation->ToString() << "\n";
for (const auto& b2 : to_move_out) {
auto instr_to_remove = b2.operands()[i];
if (!computation->IsMarkedAsDead(instr_to_remove) &&
instr_to_remove->IsDead()) {
VLOG(2) << "Removing boundary:" << b2.ToString() << "\n";
VLOG(2) << "computation: " << computation->ToString() << "\n";
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instr_to_remove));
}
}
}
HloInstruction* new_root =
conditional->branch_computation(0)->root_instruction();
*conditional->mutable_shape() = new_root->shape();
conditional->copy_sharding(new_root);
VLOG(2) << "done moving instructions out of branches\n"
<< conditional_parent->ToString(HloPrintOptions::Fingerprint());
return true;
}
absl::StatusOr<bool> ConditionalCodeMotion::MoveUserInstructionsIn(
HloInstruction* conditional, std::vector<Boundary>& to_move_in) {
if (to_move_in.empty()) {
return false;
}
absl::flat_hash_map<Boundary, Boundary> hoisted_boundaries;
int64_t to_move_in_size = to_move_in.size();
int64_t branch_count = conditional->branch_count();
HloGetTupleElementInstruction* tuple_use =
DynCast<HloGetTupleElementInstruction>(to_move_in[0].operands()[0]);
int64_t use_index = (tuple_use != nullptr && tuple_use->user_count() == 1)
? tuple_use->tuple_index()
: -1;
VLOG(2) << "Tuple use index = " << use_index << "\n";
int64_t op_index =
conditional->shape().IsTuple()
? ((use_index >= 0) ? conditional->shape().tuple_shapes_size() - 1
: conditional->shape().tuple_shapes_size())
: 0;
Boundary b_opd_use(Boundary::Position::kInsideBranch);
Boundary b_old_root(Boundary::Position::kInsideBranch);
for (int i = 0; i < branch_count; i++) {
auto computation = conditional->branch_computation(i);
auto old_root = computation->root_instruction();
b_old_root.mutable_operands().push_back(old_root);
std::vector<HloInstruction*> operands;
if (old_root->opcode() == HloOpcode::kTuple) {
for (int i = 0; i < old_root->operand_count(); ++i) {
if (i != use_index) {
operands.push_back(old_root->operands()[i]);
} else {
b_opd_use.mutable_operands().push_back(old_root->operands()[i]);
}
}
} else if (old_root->shape().IsTuple()) {
const Shape& old_shape = old_root->shape();
for (int i = 0; i < old_shape.tuple_shapes_size(); ++i) {
auto element =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(i), old_root, i));
if (i != use_index) {
operands.push_back(element);
} else {
b_opd_use.mutable_operands().push_back(element);
}
}
} else {
b_opd_use.mutable_operands().push_back(conditional);
}
HloInstruction* new_root =
computation->AddInstruction(HloInstruction::CreateTuple(operands));
VLOG(2) << "setting new root: " << new_root->ToString() << "\n";
computation->set_root_instruction(new_root,
true);
if (old_root->opcode() == HloOpcode::kTuple) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(old_root));
}
VLOG(2) << "new branch computation: " << computation->ToString() << "\n";
}
if (use_index != -1) {
for (auto* user : conditional->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() > use_index) {
user->set_tuple_index(user->tuple_index() - 1);
}
}
}
Boundary conditional_boundary(Boundary::Position::kOutsideBranchUser);
conditional_boundary.mutable_operands().push_back(conditional);
hoisted_boundaries[conditional_boundary] = b_old_root;
if (use_index >= 0) {
VLOG(2) << "Mapping GTE: " << tuple_use->ToString() << "\n";
Boundary tuple_use_boundary(Boundary::Position::kOutsideBranchUser);
tuple_use_boundary.mutable_operands().push_back(tuple_use);
hoisted_boundaries[tuple_use_boundary] = b_opd_use;
}
int64_t cp_start = (tuple_use != nullptr) ? 1 : 0;
for (int64_t to_move_index = cp_start; to_move_index < to_move_in_size;
to_move_index++) {
Boundary b_to_move = to_move_in[to_move_index];
HloInstruction* op = b_to_move.operands()[0];
CHECK(op != nullptr);
bool to_be_used_outside = true;
VLOG(2) << "Mapping new boundary instr: " << op->ToString() << "\n";
if (to_move_index < to_move_in_size - 1 && op->user_count() == 1 &&
op->users()[0] == to_move_in[to_move_index + 1].operands()[0]) {
to_be_used_outside = false;
VLOG(2) << "Instruction is not to be used outside the branch\n";
}
Boundary b(Boundary::Position::kInsideBranch);
CopyIntoConditional(b_to_move, conditional, hoisted_boundaries);
if (to_be_used_outside) {
for (int i = 0; i < branch_count; ++i) {
auto computation = conditional->branch_computation(i);
auto new_op = hoisted_boundaries[b_to_move].operands()[i];
auto new_root = computation->root_instruction();
new_root->AppendOperand(new_op);
*new_root->mutable_shape()->add_tuple_shapes() = new_op->shape();
VLOG(2) << "Extending conditional root " << i << " : "
<< new_root->ToString() << "\n";
}
HloInstruction* gtr = conditional->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(op->shape(), conditional,
op_index++));
TF_RETURN_IF_ERROR(op->ReplaceAllUsesWith(gtr));
if (conditional->parent()->root_instruction() == op) {
conditional->parent()->set_root_instruction(gtr);
}
}
}
VLOG(2) << "Done copying instructions inside branch: "
<< conditional->ToString(HloPrintOptions::Fingerprint()) << "\n";
HloInstruction* new_root =
conditional->branch_computation(0)->root_instruction();
*conditional->mutable_shape() = new_root->shape();
conditional->copy_sharding(new_root);
if (use_index != -1) {
for (auto* user : conditional->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
VLOG(2) << "Resetting shape of user: " << user->ToString() << "\n";
*user->mutable_shape() =
conditional->shape().tuple_shapes(user->tuple_index());
}
}
}
VLOG(2) << "Done moving user instructions inside branches\n"
<< conditional->parent()->ToString(HloPrintOptions::Fingerprint());
return true;
}
class MoveOperandIntoBranch {
public:
MoveOperandIntoBranch() = default;
absl::Status operator()(HloInstruction* inst, HloInstruction*& user) {
VLOG(1) << "operand to move into branch: " << inst->ToString();
VLOG(2) << "MoveIntoBranches user =" << user->ToString() << "\n";
CHECK(inst->user_count() == 1 || inst->opcode() == HloOpcode::kBroadcast);
absl::InlinedVector<HloInstruction*, 4> new_operands;
std::vector<std::vector<int64_t>> matching_tuple_indices;
TF_RETURN_IF_ERROR(
ReplaceInputInUser(inst, user, new_operands, matching_tuple_indices));
TF_RETURN_IF_ERROR(
MoveInputIntoBranch(inst, user, new_operands, matching_tuple_indices));
if (inst->user_count() == 0) {
TF_RETURN_IF_ERROR(inst->parent()->RemoveInstruction(inst));
}
return absl::OkStatus();
}
private:
HloInstruction* InsertIntoBranch(HloInstruction* inst,
HloInstruction* branch_input) {
VLOG(2) << "Branch input=" << branch_input->ToString() << "\n";
auto branch_comp = branch_input->parent();
std::vector<HloInstruction*> operands(inst->operand_count());
for (int64_t i = 0; i < inst->operand_count(); ++i) {
VLOG(2) << "processing operand =" << i << "\n";
if (branch_input->shape().IsTuple()) {
int64_t j = std::find(inst->operands().begin(), inst->operands().end(),
inst->operands()[i]) -
inst->operands().begin();
VLOG(2) << "operand index = " << j << "\n";
CHECK(j < branch_input->shape().tuple_shapes_size());
if (j < i) {
operands[i] = operands[j];
} else {
CHECK(op_map_.contains(inst->operands()[i]));
int64_t index = op_map_[inst->operands()[i]];
operands[i] =
branch_comp->AddInstruction(HloInstruction::CreateGetTupleElement(
branch_input->shape().tuple_shapes(index), branch_input,
index));
}
} else {
CHECK(inst->operands()[i] == inst->operands()[0]);
operands[i] = branch_input;
}
}
return branch_comp->AddInstruction(
inst->CloneWithNewOperands(inst->shape(), operands));
}
bool UpdateParamShape(
std::vector<std::vector<int64_t>>& matching_tuple_indices,
const Shape* param_shape, HloInstruction*& branch_param,
HloInstruction*& param_tuple) {
bool used = false;
for (int64_t matching_index = matching_tuple_indices.size() - 1;
matching_index >= 0; --matching_index) {
auto* new_tuple = CloneNestedTuples(branch_param);
CHECK_NE(new_tuple, nullptr);
VLOG(5) << "Cloned new tuple:" << new_tuple->parent()->ToString() << "\n";
std::vector<std::vector<HloInstruction*>> gte_users;
gte_users.reserve(branch_param->shape().tuple_shapes_size());
for (int64_t j = 0; j < branch_param->shape().tuple_shapes_size(); ++j) {
gte_users.push_back(std::vector<HloInstruction*>());
}
for (auto* param_user : branch_param->users()) {
if (param_user->opcode() == HloOpcode::kGetTupleElement) {
CHECK_LT(param_user->tuple_index(), gte_users.size());
gte_users[param_user->tuple_index()].push_back(param_user);
}
}
used = false;
*branch_param->mutable_shape() = *param_shape;
const Shape* new_param_shape = nullptr;
for (auto param_users : gte_users) {
if (param_users.empty()) continue;
CHECK_EQ(param_users[0]->opcode(), HloOpcode::kGetTupleElement);
auto tuple_index = param_users[0]->tuple_index();
VLOG(1) << "Processing gte users: " << param_users.size() << "\n";
VLOG(1) << "tuple_index: " << tuple_index << "\n";
VLOG(1) << "matching_tuple_indices: "
<< matching_tuple_indices[matching_index][0] << "\n";
if (matching_tuple_indices[matching_index].end() ==
std::find(matching_tuple_indices[matching_index].begin(),
matching_tuple_indices[matching_index].end(),
tuple_index)) {
continue;
}
for (HloInstruction* param_user : param_users) {
VLOG(1) << "param_user: " << param_user->ToString() << "\n";
if (new_param_shape == nullptr) {
branch_param = param_user;
if (matching_index > 0) {
param_tuple = branch_param;
}
CHECK_GT(param_shape->tuple_shapes_size(), tuple_index);
new_param_shape = ¶m_shape->tuple_shapes(tuple_index);
param_shape = new_param_shape;
VLOG(1) << "new_param_shape: " << param_shape->ToString();
*param_user->mutable_shape() = *new_param_shape;
VLOG(1) << "branch parameter: " << param_user->ToString();
used = true;
} else {
VLOG(1) << "new_param_shape=" << new_param_shape->ToString();
*param_user->mutable_shape() = *new_param_shape;
TF_CHECK_OK(param_user->ReplaceAllUsesWith(branch_param));
}
}
}
if (!used) {
break;
}
}
return used;
}
absl::Status ReplaceInputInUser(
HloInstruction* input, HloInstruction*& user,
absl::InlinedVector<HloInstruction*, 4>& new_operands,
std::vector<std::vector<int64_t>>& matching_tuple_indices) {
for (int64_t j = 0; j < input->operand_count(); ++j) {
VLOG(2) << "Push back input operand index: " << j;
auto operand = input->mutable_operand(j);
if (std::find(new_operands.begin(), new_operands.end(), operand) ==
new_operands.end()) {
new_operands.push_back(operand);
}
}
if (user->opcode() == HloOpcode::kTuple) {
for (HloInstruction *input_now = input, *user_now = user;
user_now->opcode() != HloOpcode::kConditional;
input_now = user_now, user_now = user_now->users()[0]) {
std::vector<int64_t> matching_tuple_index;
for (int64_t i = 0; i < user_now->operand_count(); ++i) {
if (user_now->operand(i) != input_now) {
continue;
}
matching_tuple_index.push_back(i);
}
CHECK(!matching_tuple_index.empty());
matching_tuple_indices.push_back(matching_tuple_index);
CHECK_EQ(user_now->user_count(), 1);
}
CHECK(!matching_tuple_indices.empty());
int64_t repl_count = 0;
for (auto opd_index : matching_tuple_indices[0]) {
HloInstruction* new_input =
(repl_count < new_operands.size())
? new_operands[repl_count++]
: input->AddInstruction(HloInstruction::CreateTuple({}));
op_map_[new_input] = opd_index;
VLOG(2) << "Mapping operand " << repl_count << " = "
<< new_input->ToString() << " to " << opd_index;
TF_RETURN_IF_ERROR(
user->ReplaceOperandWithDifferentShape(opd_index, new_input));
*user->mutable_shape()->mutable_tuple_shapes(opd_index) =
new_input->shape();
}
while (repl_count < new_operands.size()) {
HloInstruction* new_input = new_operands[repl_count++];
auto new_input_in_user = std::find(user->operands().begin(),
user->operands().end(), new_input);
int64_t opd_index = (new_input_in_user == user->operands().end())
? user->operand_count()
: new_input_in_user - user->operands().begin();
op_map_[new_input] = opd_index;
CHECK(op_map_.contains(new_input));
VLOG(2) << "Mapping operand " << new_input->ToString() << " to "
<< opd_index;
user->AppendOperand(new_input);
user->mutable_shape()->mutable_tuple_shapes()->push_back(
new_input->shape());
}
int64_t nesting_index = 1;
for (auto user_now = user->users()[0];
nesting_index < matching_tuple_indices.size() &&
user_now->opcode() != HloOpcode::kConditional;
user = user_now, user_now = user_now->users()[0], nesting_index++) {
VLOG(2) << "Replacing tuple: " << user->ToString();
CHECK(user_now->shape().IsTuple());
for (auto opd_index : matching_tuple_indices[nesting_index]) {
*user_now->mutable_shape()->mutable_tuple_shapes(opd_index) =
user->shape();
}
VLOG(2) << "Done replacing tuple:" << user->ToString();
CHECK_EQ(user_now->user_count(), 1);
}
VLOG(2) << "User: " << user->ToString() << "\n";
}
return absl::OkStatus();
}
absl::Status MoveInputIntoBranch(
HloInstruction* input, HloInstruction*& user,
absl::InlinedVector<HloInstruction*, 4>& new_operands,
std::vector<std::vector<int64_t>>& matching_tuple_indices) {
HloInstruction* cond =
(user->opcode() != HloOpcode::kConditional && user->user_count() == 1)
? user->users()[0]
: user;
if (user == cond) {
auto new_input =
input->AddInstruction(HloInstruction::CreateTuple(new_operands));
for (int64_t i = 0; i < new_operands.size(); ++i) {
op_map_[new_operands[i]] = i;
}
user = new_input;
TF_RETURN_IF_ERROR(input->ReplaceUseWithDifferentShape(cond, new_input));
}
TF_RET_CHECK(cond->opcode() == HloOpcode::kConditional)
<< "User has non-conditional users";
for (int64_t branch = 0; branch < cond->branch_count(); ++branch) {
if (cond->operand(branch + 1) != user) {
continue;
}
VLOG(2) << "Modifying conditional branch: " << branch << "\n";
auto branch_comp = cond->branch_computation(branch);
auto branch_param = branch_comp->parameter_instruction(0);
auto* param_shape = &user->shape();
VLOG(2) << "param_shape: " << param_shape->ToString() << "\n";
VLOG(2) << "branch parameter: " << branch_param->ToString() << "\n";
HloInstruction* param_tuple = branch_param;
if (matching_tuple_indices.empty()) {
VLOG(2) << "The original input is passed in as conditional parameter "
"directly.";
VLOG(5) << branch_comp->ToString() << "\n";
*branch_param->mutable_shape() = *param_shape;
if (branch_param == branch_comp->root_instruction()) {
VLOG(2) << "Cloning root user";
auto new_user =
branch_comp->AddInstruction(HloInstruction::CreateGetTupleElement(
branch_param->shape().tuple_shapes(0), branch_param, 0));
VLOG(2) << "new_user: " << new_user->ToString() << "\n";
branch_comp->set_root_instruction(new_user,
true);
}
} else {
if (!UpdateParamShape(matching_tuple_indices, param_shape, branch_param,
param_tuple)) {
VLOG(2) << "instruction is not used in this branch.";
continue;
}
}
auto inserted = InsertIntoBranch(input, param_tuple);
VLOG(2) << "Inserted operands:" << inserted->ToString() << "\n";
std::vector<HloInstruction*> tuple_users = branch_param->users();
for (auto param_user : tuple_users) {
if (param_user == inserted ||
(param_user->opcode() == HloOpcode::kGetTupleElement &&
param_user != branch_comp->root_instruction())) {
continue;
}
TF_RETURN_IF_ERROR(
branch_param->ReplaceUseWithDifferentShape(param_user, inserted));
if (branch_comp->root_instruction()->opcode() ==
HloOpcode::kGetTupleElement &&
!branch_comp->root_instruction()->operand(0)->shape().IsTuple()) {
branch_comp->set_root_instruction(
branch_comp->root_instruction()->mutable_operands()[0]);
}
UpdateTupleUsers(inserted);
}
}
return absl::OkStatus();
}
void UpdateTupleUsers(HloInstruction* param_user) {
for (auto new_user : param_user->users()) {
if (new_user->opcode() == HloOpcode::kTuple) {
for (int64_t opd_index = 0; opd_index < new_user->operand_count();
++opd_index) {
if (new_user->operands()[opd_index] != param_user) {
continue;
}
*new_user->mutable_shape()->mutable_tuple_shapes(opd_index) =
param_user->shape();
UpdateTupleUsers(new_user);
VLOG(2) << "Updated tuple user: " << new_user->ToString() << "\n";
}
}
}
}
absl::flat_hash_map<const HloInstruction*, int64_t> op_map_;
};
absl::StatusOr<bool> ConditionalCodeMotion::MoveOperandInstructionsIn(
HloInstruction* conditional, std::vector<Boundary>& to_move_in) {
int64_t to_move_in_size = to_move_in.size();
CHECK_GT(to_move_in_size, 0);
VLOG(2) << "Before moving operand instructions inside branch: "
<< conditional->ToString(HloPrintOptions::Fingerprint()) << "\n";
HloInstruction* user = conditional;
int64_t user_index = 0;
MoveOperandIntoBranch move_into_branch;
for (int64_t to_move_index = 0; to_move_index < to_move_in_size;
to_move_index++) {
Boundary b_to_move = to_move_in[to_move_index];
HloInstruction* op = b_to_move.operands()[0];
CHECK_NE(op, nullptr);
if (op->user_count() == 1) {
user = op->users()[0];
user_index = user->operand_index(op);
}
if (op->opcode() == HloOpcode::kTuple) {
continue;
}
VLOG(1) << "Mapping new boundary instr: " << op->ToString() << "\n";
VLOG(1) << "current user = " << user->ToString();
std::vector<std::pair<HloInstruction*, int64_t>> users;
for (auto* user_now = user; user_now != conditional;
user_now = user_now->users()[0]) {
CHECK_EQ(user_now->user_count(), 1);
VLOG(1) << "Saving user: " << user_now->ToString() << "\n";
users.push_back(std::make_pair(
user_now->users()[0], user_now->users()[0]->operand_index(user_now)));
}
TF_RETURN_IF_ERROR(move_into_branch(op, user));
for (int64_t i = users.size() - 1; i > 0; --i) {
CHECK_NE(users[i].first, nullptr);
CHECK_NE(users[i - 1].first, nullptr);
users[i - 1].first = users[i].first->mutable_operand(users[i].second);
}
if (!users.empty()) {
user = users.front().first->mutable_operand(users.front().second);
VLOG(1) << "Updated user: " << user->ToString() << "\n";
}
}
VLOG(2) << "Done moving operand instructions inside branch: "
<< conditional->ToString(HloPrintOptions::Fingerprint()) << "\n";
return true;
}
class GroupConnectedBoundaries {
private:
std::vector<Boundary> connected_boundaries_, new_boundaries_;
int64_t connected_boundaries_memory_increase_ = 0;
HloInstruction* conditional_;
HloComputation* conditional_parent_;
bool is_layout_sensitive_;
absl::flat_hash_map<HloInstruction*, int>& visited_count_;
std::vector<std::vector<int64_t>>& move_config_;
std::vector<std::vector<int64_t>>& reuse_config_;
absl::Span<int64_t> search_config_vec_;
int64_t& search_config_;
int64_t search_subscript_;
absl::flat_hash_map<const int64_t*, int64_t> flipped_;
int64_t FlipMutation(int64_t* loc, const int64_t non_zero,
const std::string& msg) {
if (search_config_ == 0 || ContainsKey(flipped_, loc)) {
VLOG(2) << "Configured not to search or loc is already flipped.";
return *loc;
}
int c = ConditionalCodeMotion::flip_start(search_config_);
VLOG(2) << "flip start index = " << c << "\n";
if (c > 0) {
search_config_--;
return *loc;
}
auto flip_count = ConditionalCodeMotion::DecrementMaxFlip(&search_config_);
VLOG(2) << "max flip count = " << flip_count << "\n";
VLOG(2) << "Updating max Flipping configuration = " << search_config_
<< "\n";
if (flip_count == 0) {
VLOG(2) << "Maximum flip count has reached. ";
if (search_subscript_ + 1 < search_config_vec_.size()) {
VLOG(2) << "search_subscript_ = " << search_subscript_;
VLOG(2) << "search config vec size = " << search_config_vec_.size();
search_config_ = search_config_vec_[++search_subscript_];
} else {
return *loc;
}
}
auto flip_stride = ConditionalCodeMotion::flip_stride(search_config_);
search_config_ += flip_stride;
VLOG(2) << "flip stride = " << flip_stride << "\n";
VLOG(2) << "Updating Flipping Stride = " << search_config_ << "\n";
flipped_[loc] = *loc;
switch (*loc) {
case 0:
*loc = non_zero;
break;
default:
*loc = 0;
break;
}
VLOG(2) << "Flipping decision for: " << msg << ": from " << flipped_[loc]
<< " to " << *loc << "\n";
return *loc;
}
static std::vector<int64_t>& EnsureSearchConfig(
std::vector<int64_t>& search_config) {
if (search_config.empty()) {
search_config.push_back(0);
}
return search_config;
}
public:
explicit GroupConnectedBoundaries(
HloInstruction* conditional, bool is_layout_sensitive,
absl::flat_hash_map<HloInstruction*, int>& visited_count,
std::vector<std::vector<int64_t>>* move_config,
std::vector<std::vector<int64_t>>* reuse_config,
std::vector<int64_t>& search_config)
: conditional_(conditional),
conditional_parent_(conditional->parent()),
is_layout_sensitive_(is_layout_sensitive),
visited_count_(visited_count),
move_config_(*move_config),
reuse_config_(*reuse_config),
search_config_vec_(EnsureSearchConfig(search_config)),
search_config_(search_config_vec_.front()),
search_subscript_(0) {
VLOG(2) << "Initializing Group Connected Boundaries\n";
}
int64_t ReusesCarriedBy(HloInstruction* op, HloInstruction* user) {
std::vector<int64_t>& curconfig =
reuse_config_[static_cast<uint32_t>(op->opcode())];
int64_t config =
(search_config_ < 0)
? FlipMutation(&curconfig[static_cast<uint32_t>(user->opcode())],
-10,
absl::StrCat(HloOpcodeString(op->opcode()), "->",
HloOpcodeString(user->opcode())))
: curconfig[static_cast<uint32_t>(user->opcode())];
VLOG(2) << "ConditionalCodeMotion: Add reuses carried by instr: "
<< op->ToString() << "=>" << user->ToString() << " : " << config
<< "\n";
if (config < 0) {
int count1 = CountNonLeafOps(op->users());
int count2 = CountNonLeafOps(user->operands());
return (-config) / count1 / count2;
}
return config;
}
void clear_recently_visited() {
for (const auto& boundary : new_boundaries_) {
visited_count_.erase(boundary.operands()[0]);
}
}
bool WorthHoisting(HloInstruction* instruction, Boundary::Position pos,
int64_t index) {
VLOG(1) << "Check Worth hoisting\n";
HloOpcode opcode = instruction->opcode();
if (opcode == HloOpcode::kTuple &&
instruction == conditional_parent_->root_instruction()) {
VLOG(1) << "Do not move conditional parent.";
return false;
}
if (pos == Boundary::Position::kOutsideBranchOperand) {
if (opcode == HloOpcode::kTuple && instruction->has_sharding()) {
VLOG(1) << "Not moving operand because of sharding annotations.";
return false;
}
if (instruction->user_count() > 1) {
VLOG(1) << "Not moving operand b/c it has >1 users.";
return false;
}
if (instruction->HasSideEffect()) {
VLOG(1) << "Not moving operand b/c it has side effects.";
return false;
}
if (opcode == HloOpcode::kGetTupleElement) {
VLOG(1) << "Do not move GetTupleElement.";
return false;
}
}
if (DynCast<HloChannelInstruction>(instruction) &&
pos != Boundary::Position::kInsideBranch) {
VLOG(1) << "It is not safe to move collectives inside branches.";
return false;
}
if (opcode == HloOpcode::kParameter) {
return false;
}
if (opcode == HloOpcode::kGetTupleElement &&
pos == Boundary::Position::kOutsideBranchOperand) {
return false;
}
std::vector<int64_t>& curconfig =
move_config_[static_cast<uint32_t>(opcode)];
auto col = (curconfig.size() == 1) ? 0
: (instruction->operand_count() > 0)
? static_cast<uint32_t>(instruction->operand(0)->opcode())
: 0;
VLOG(2) << "column = " << col << "\n";
VLOG(2) << "config size = " << curconfig.size() << "\n";
VLOG(2) << "search_config = " << search_config_ << "\n";
CHECK(col < curconfig.size());
uint32_t config =
(search_config_ > 0)
? FlipMutation(&curconfig[col], 1,
absl::StrCat("Move-", HloOpcodeString(opcode)))
: curconfig[col];
VLOG(2) << "Checking instruction is worth moving: " << config << "\n";
VLOG(2) << "after checking search_config = " << search_config_ << "\n";
return (config != 0);
}
int64_t ReusesBeforeBoundary(HloInstruction* user) {
int64_t reuses = 0;
for (auto op : user->operands()) {
if (!ContainsKey(visited_count_, op) && op != conditional_) {
continue;
}
if (auto tuple_gte = DynCast<HloGetTupleElementInstruction>(user)) {
if (op->opcode() == HloOpcode::kConditional) {
auto tuple = op->branch_computation(0)->root_instruction();
if (tuple->opcode() == HloOpcode::kTuple) {
auto index = tuple_gte->tuple_index();
CHECK(index < tuple->operand_count());
op = tuple->mutable_operand(index);
}
}
reuses += ReusesCarriedBy(op, user->users()[0]);
} else {
reuses += ReusesCarriedBy(op, user);
}
}
VLOG(2) << "Reuses before instruction " << user->ToString() << ":" << reuses
<< "\n";
return reuses;
}
int64_t ReusesAfterBoundary(HloInstruction* user, int64_t tuple_idx = -1) {
CHECK(user != nullptr);
if (user->opcode() == HloOpcode::kConstant) {
return 0;
}
auto all_users = user->users();
if (tuple_idx < 0 && all_users.size() > 1) {
VLOG(2) << "Having multiple users from: " << user->ToString() << "\n";
return 0;
}
if (!all_users.empty()) {
auto op = all_users[0];
int64_t reuses = 0;
if (tuple_idx >= 0) {
VLOG(2) << "Reuse for conditional operands with tuple index = "
<< tuple_idx << "\n";
VLOG(2) << "user op = " << op->ToString();
if (op->opcode() == HloOpcode::kConditional) {
int64_t reuse_count = 0;
for (int64_t i = 0; i < conditional_->branch_count(); ++i) {
VLOG(5) << "Counting use in branch " << i << "\n";
if (conditional_->operand(i + 1) != user) {
continue;
}
CHECK_EQ(conditional_->branch_computation(i)
->parameter_instructions()
.size(),
1);
auto param_i =
conditional_->branch_computation(i)->parameter_instruction(0);
if (param_i ==
conditional_->branch_computation(i)->root_instruction()) {
VLOG(5) << "parameter is root.\n";
reuse_count++;
continue;
}
if (!param_i->shape().IsTuple() && param_i->user_count() > 0) {
VLOG(5) << "parameter is not tuple and is used. \n";
reuse_count++;
continue;
}
for (auto* param_i_user : param_i->users()) {
if (param_i_user->opcode() == HloOpcode::kGetTupleElement &&
param_i_user->tuple_index() == tuple_idx) {
reuse_count++;
VLOG(5) << "Found user" << param_i_user->ToString() << "\n";
break;
}
}
}
VLOG(2) << "Reuse count for conditional:" << reuse_count << "\n";
if (reuse_count < conditional_->branch_count()) {
reuses += 10;
}
} else if (op->opcode() == HloOpcode::kTuple) {
VLOG(2) << "new tuple index = " << op->operand_index(user);
return ReusesAfterBoundary(op, op->operand_index(user));
} else {
return ReusesAfterBoundary(op, tuple_idx);
}
} else if (op ==
conditional_->branch_computation(0)->root_instruction()) {
int64_t index = op->operand_index(user);
for (auto op2 : conditional_->users()) {
if (op2->opcode() == HloOpcode::kGetTupleElement) {
auto tuple_opd = static_cast<HloGetTupleElementInstruction*>(op2);
if (index == tuple_opd->tuple_index()) {
all_users = op2->users();
if (!all_users.empty()) {
reuses += ReusesCarriedBy(user, all_users[0]);
break;
}
}
}
}
} else if (ContainsKey(visited_count_, op)) {
reuses += ReusesCarriedBy(user, op);
}
VLOG(2) << "reuses after instruction " << user->ToString() << ":"
<< reuses << "\n";
return reuses;
}
return 0;
}
int64_t BenefitForMovingBoundaries(const std::vector<Boundary>& boundaries,
bool perform_reuse_analysis = true) {
int64_t reuses_before = 0, reuses_after = 0;
if ((boundaries[0].IsInsideBranch() ||
boundaries[0].IsOutsideBranchOperand()) &&
absl::c_count_if(boundaries, [](const Boundary& b) {
return b.operands()[0]->opcode() != HloOpcode::kTuple;
}) == 0) {
return -1;
}
if (boundaries.size() == 1) {
if (boundaries[0].IsOutsideBranchUser() &&
boundaries[0].operands()[0]->opcode() ==
HloOpcode::kGetTupleElement) {
return -1;
}
}
if (!perform_reuse_analysis) {
return 1;
}
auto get_copy_folding_benefit = [&](HloInstruction* hlo) -> int64_t {
if (hlo->opcode() != HloOpcode::kCopy) {
return 0;
}
const HloGetTupleElementInstruction* gte =
DynCast<HloGetTupleElementInstruction>(hlo->operand(0));
if (gte == nullptr) {
return 0;
}
const HloInstruction* conditional = gte->operand(0);
if (conditional != conditional_) {
return 0;
}
int64_t benefit = 0;
for (auto* branch : conditional->called_computations()) {
HloInstruction* root = branch->root_instruction();
if (root->opcode() == HloOpcode::kTuple) {
const auto* tuple_operand = root->operand(gte->tuple_index());
if (tuple_operand->opcode() == HloOpcode::kCopy) {
if (Shape::Equal()(tuple_operand->operand(0)->shape(),
hlo->shape())) {
benefit += 10;
}
}
}
}
return benefit;
};
for (const Boundary& b : boundaries) {
auto op = b.operands()[0];
if (op == conditional_->branch_computation(0)->root_instruction()) {
continue;
}
VLOG(2) << "Benefit for " << op->ToString();
reuses_before += ReusesBeforeBoundary(op);
VLOG(2) << "Reuses before boundary so far: " << reuses_before << "\n";
reuses_after += ReusesAfterBoundary(
op, boundaries[0].IsOutsideBranchOperand() ? 0 : -1);
VLOG(2) << "Reuese after boundary so far : " << reuses_after << "\n";
}
int64_t copy_folding_benefit = 0;
if (boundaries[0].IsOutsideBranchUser()) {
for (const Boundary& b : boundaries) {
auto op = b.operands()[0];
copy_folding_benefit += get_copy_folding_benefit(op);
}
}
VLOG(2) << "Copy folding benefit: " << copy_folding_benefit;
if (reuses_after == 0 && reuses_before == 0 && copy_folding_benefit == 0) {
return -1;
} else if (boundaries[0].IsInsideBranch()) {
return reuses_after - reuses_before;
} else if (boundaries[0].IsOutsideBranchUser()) {
return reuses_before - reuses_after - 1 + copy_folding_benefit;
} else {
CHECK(boundaries[0].IsOutsideBranchOperand());
return reuses_after > 0;
}
}
Boundary GetNextBoundary(const Boundary& b, int64_t op_index) {
Boundary b2(b.GetPosition());
for (int j = 0; j < b.operands().size(); ++j) {
HloInstruction* inst = b.operands()[j];
CHECK(inst != nullptr);
HloInstruction* op = (b.IsInsideBranch() || b.IsOutsideBranchOperand())
? inst->operands()[op_index]
: inst->users()[op_index];
CHECK(op != nullptr);
b2.mutable_operands().push_back(op);
}
return b2;
}
bool IsSafeToMoveBoundary(const Boundary& next_boundary) {
VLOG(1) << "Check is safe to move boundary.\n";
int64_t next_boundary_count =
(next_boundary.IsInsideBranch() ||
next_boundary.IsOutsideBranchOperand())
? next_boundary.operands()[0]->user_count()
: CountNonLeafOps(next_boundary.operands()[0]->operands());
if (next_boundary_count <= 1) {
if (next_boundary.IsOutsideBranchOperand() &&
next_boundary.operands()[0]->users()[0] == conditional_ &&
next_boundary.operands()[0] == conditional_->operand(0)) {
return false;
}
return true;
} else {
if (!ContainsKey(visited_count_, next_boundary.operands()[0])) {
VLOG(1) << "Skip next boundary " << next_boundary.ToString() << "\n"
<< " because it has multiple dependents: "
<< next_boundary_count << "\n";
visited_count_[next_boundary.operands()[0]] = 1;
new_boundaries_.push_back(next_boundary);
} else {
auto pos = std::find(new_boundaries_.begin(), new_boundaries_.end(),
next_boundary);
if (pos != new_boundaries_.end() ||
next_boundary.operands().size() == 1) {
int count = ++visited_count_[next_boundary.operands()[0]];
if (count == next_boundary_count) {
VLOG(2) << "Recovering next boundary " << next_boundary.ToString()
<< "\n"
<< " because all of its dependents have been visited: "
<< next_boundary_count << "\n";
visited_count_.erase(next_boundary.operands()[0]);
if (pos != new_boundaries_.end()) {
new_boundaries_.erase(pos);
}
return true;
}
} else {
VLOG(1) << "Skip incompatible multi-dependent boundary: "
<< next_boundary.ToString() << ":" << next_boundary_count
<< "\n";
}
}
}
return false;
}
void AddBoundaries(const Boundary& boundary) {
auto calc_memory_size = [](const HloInstruction* hlo) -> int64_t {
if (hlo->shape().IsTuple()) {
return 0;
}
return ShapeUtil::ByteSizeOf(hlo->shape(), 1) >> 9;
};
BoundaryVisitor visitor;
visitor.AddToWorkList(boundary);
int64_t boundary_index = 0;
while (visitor.HasNextBoundary()) {
Boundary b = visitor.PopNextBoundary();
VLOG(1) << "visiting boundary " << b.ToString() << "\n";
VLOG(1) << "boundary index=" << boundary_index << "\n";
if ((b.IsOutsideBranchUser() || b.IsOutsideBranchOperand() ||
InstructionWithinBranchIdentical(b.operands(),
is_layout_sensitive_)) &&
IsSafeToMoveBoundary(b) &&
WorthHoisting(b.operands()[0], b.GetPosition(), boundary_index)) {
connected_boundaries_.push_back(b);
boundary_index++;
auto output_size = calc_memory_size(b.operands()[0]);
connected_boundaries_memory_increase_ -= output_size;
VLOG(1) << "memory incr = " << connected_boundaries_memory_increase_
<< " after subtracting output size.\n";
VLOG(1) << "boundary can be moved.";
int64_t operand_count =
(b.IsInsideBranch() || b.IsOutsideBranchOperand())
? b.operands()[0]->operand_count()
: b.operands()[0]->users().size();
for (int i = 0; i < operand_count; i++) {
Boundary next_boundary = GetNextBoundary(b, i);
VLOG(1) << "Add operand/user " << i << " to visit later\n";
visitor.AddToWorkList(next_boundary);
connected_boundaries_memory_increase_ +=
calc_memory_size(next_boundary.operands()[0]);
VLOG(1) << "memory incr = " << connected_boundaries_memory_increase_
<< " after adding shape size of operand " << i << "\n";
}
} else if (b.IsOutsideBranchOperand() &&
b.operands()[0]->opcode() == HloOpcode::kBroadcast &&
connected_boundaries_.size() > 1 &&
absl::c_find(
b.operands()[0]->users(),
connected_boundaries_[connected_boundaries_.size() - 1]
.operands()[0]) != b.operands()[0]->users().end() &&
connected_boundaries_[connected_boundaries_.size() - 1]
.operands()[0]
->opcode() != HloOpcode::kTuple) {
VLOG(1) << "Replicating multi-use broadcasts:" << b.ToString() << "\n";
connected_boundaries_.push_back(b);
auto output_size = calc_memory_size(b.operands()[0]) -
calc_memory_size(b.operands()[0]->operand(0));
connected_boundaries_memory_increase_ -= output_size;
VLOG(1) << "memory incr = " << connected_boundaries_memory_increase_;
VLOG(1) << "boundary can be moved.";
} else {
VLOG(1) << "boundary cannot be moved\n";
visited_count_[b.operands()[0]] = 1;
new_boundaries_.push_back(b);
}
}
}
std::pair<std::vector<Boundary>, int64_t> BoundariesToMoveInOrOut(
HloInstruction* conditional, const Boundary& b) {
HloInstruction* inst = b.operands()[0];
if (inst == conditional) {
int branch_count = inst->branch_count();
Boundary boundary_in(Boundary::Position::kInsideBranch);
for (int i = 0; i < branch_count; i++) {
HloComputation* branch_computation = inst->branch_computation(i);
HloInstruction* root_inst = branch_computation->root_instruction();
CHECK(root_inst != nullptr);
boundary_in.mutable_operands().push_back(root_inst);
}
new_boundaries_.push_back(boundary_in);
for (auto u : inst->users()) {
Boundary boundary_in(Boundary::Position::kOutsideBranchUser);
boundary_in.mutable_operands().push_back(u);
new_boundaries_.push_back(boundary_in);
}
for (int64_t opd_idx = 1; opd_idx < inst->operand_count(); opd_idx++) {
HloInstruction* u = inst->mutable_operand(opd_idx);
Boundary boundary_in(Boundary::Position::kOutsideBranchOperand);
boundary_in.mutable_operands().push_back(u);
new_boundaries_.push_back(boundary_in);
}
} else {
AddBoundaries(b);
}
return std::pair<std::vector<Boundary>, int64_t>(
connected_boundaries_, connected_boundaries_memory_increase_);
}
void AddNewBoundaries(std::vector<Boundary>& b) {
b.insert(b.end(), new_boundaries_.begin(), new_boundaries_.end());
}
};
ConditionalCodeMotion::Decision ConditionalCodeMotion::ConsiderCodeMotion(
HloInstruction* conditional, const Boundary& cur_boundary,
std::vector<Boundary>& to_move, std::vector<Boundary>& new_boundaries,
absl::flat_hash_map<HloInstruction*, int>& visited_count) {
GroupConnectedBoundaries connect(conditional, is_layout_sensitive_,
visited_count, &move_config_, &reuse_config_,
search_config_);
auto move_in_or_out =
connect.BoundariesToMoveInOrOut(conditional, cur_boundary);
if (!move_in_or_out.first.empty()) {
auto benefit = connect.BenefitForMovingBoundaries(
move_in_or_out.first, search_config_map_.empty());
VLOG(2) << "benefit of moving in or out "
<< cur_boundary.operands()[0]->ToString() << ":" << benefit << "\n";
if (benefit >= 0) {
if (move_in_or_out.second > 0 &&
move_in_or_out.second / move_in_or_out.first.size() >
memory_increase_allowance_) {
VLOG(1) << "Stop moving operands because of memory pressure: "
<< move_in_or_out.second << " / " << move_in_or_out.first.size()
<< " > " << memory_increase_allowance_ << "\n";
benefit = -1;
} else {
VLOG(1) << "Increase memory pressure by " << move_in_or_out.second
<< "\n";
memory_increase_ += move_in_or_out.second;
}
}
if (benefit >= 0) {
new_boundaries.clear();
connect.AddNewBoundaries(new_boundaries);
to_move = move_in_or_out.first;
return Decision(to_move[0].IsInsideBranch()
? Decision::Direction::kMoveOutOfBranch
: Decision::Direction::kMoveIntoBranch,
benefit);
} else {
connect.clear_recently_visited();
}
} else {
connect.AddNewBoundaries(new_boundaries);
}
return Decision(Decision::Direction::kNoChange, 0);
}
absl::StatusOr<bool> ConditionalCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Begin a new pass of conditional code motion optimization.\n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching 0.\n";
})) {
return false;
}
bool changed = false;
bool cleanup_changed = false;
{
HloPassPipeline subpipeline("before_conditional_code_motion");
subpipeline.AddPass<HloCSE>(is_layout_sensitive_);
subpipeline.AddPass<HloDCE>();
TF_ASSIGN_OR_RETURN(auto cleanup_changed_now,
subpipeline.Run(module, execution_threads));
cleanup_changed |= cleanup_changed_now;
}
std::vector<HloInstruction*> conditional_ops;
absl::flat_hash_map<HloComputation*, int> conditional_computations;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
for (auto* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kConditional) {
int branch_count = instr->branch_count();
for (int i = 0; i < branch_count; ++i) {
HloComputation* branch_i = instr->branch_computation(i);
if (ContainsKey(conditional_computations, branch_i)) {
conditional_computations[branch_i]++;
} else {
conditional_computations[branch_i] = 0;
}
}
if (instr->shape().IsTuple()) {
bool can_change_tuple_shape = true;
for (auto user : instr->users()) {
VLOG(2) << "user is : " << user->ToString() << "\n";
if (user->opcode() != HloOpcode::kGetTupleElement) {
can_change_tuple_shape = false;
}
}
if (can_change_tuple_shape) {
conditional_ops.push_back(instr);
}
} else {
conditional_ops.push_back(instr);
}
}
}
}
int64_t conditional_index = 0;
HloCloneContext clone_context(module);
for (HloInstruction* conditional : conditional_ops) {
if (conditional_index == 0 || !search_config_map_.empty()) {
auto config_entry = search_config_map_.find(conditional_index);
if (config_entry != search_config_map_.end()) {
search_config_ = (*config_entry).second;
VLOG(2) << "config entry value extracted:" << search_config_.size();
search_config_index_ = 0;
}
VLOG(2) << "Obtaining default configuration for conditional "
<< conditional_index << "\n";
SetDefaultMoveConfig();
VLOG(2) << "Done obtaining default configuration\n";
conditional_index++;
}
int branch_count = conditional->branch_count();
bool conditional_is_shared = false;
for (int i = 0; i < branch_count; ++i) {
HloComputation* branch_i = conditional->branch_computation(i);
if (conditional_computations[branch_i] > 0) {
conditional_is_shared = true;
break;
}
}
std::vector<std::vector<Boundary>> to_move_out, to_move_in;
std::vector<std::vector<Boundary>> new_boundaries_for_moveout;
std::vector<std::vector<Boundary>> new_boundaries_for_movein;
absl::flat_hash_map<HloInstruction*, int> visited_count;
int benefit_move_out = 0, benefit_move_in = 0;
Decision::Direction final_d = Decision::Direction::kNoChange;
BoundaryVisitor visitor(conditional);
VLOG(2) << "Analyzing conditional:" << conditional->ToString() << "\n";
while (visitor.HasNextBoundary()) {
std::vector<Boundary> to_move, next_boundary;
Boundary boundary = visitor.PopNextBoundary();
VLOG(2) << "Analyzing boundary:" << boundary.ToString() << "\n";
auto d = ConsiderCodeMotion(conditional, boundary, to_move, next_boundary,
visited_count);
switch (d.GetDirection()) {
case Decision::Direction::kMoveOutOfBranch:
VLOG(2) << "Local Decision is move out of branch\n";
to_move_out.push_back(to_move);
new_boundaries_for_moveout.push_back(next_boundary);
benefit_move_out += d.GetBenefit();
if (benefit_move_out >= benefit_move_in) {
final_d = Decision::Direction::kMoveOutOfBranch;
VLOG(2) << "Current Decision is move out of branch ("
<< to_move_out.size() << ")\n";
} else {
VLOG(2) << "Current Decision remains move into branch\n";
}
break;
case Decision::Direction::kMoveIntoBranch:
VLOG(2) << "Decision is move into branch\n";
to_move_in.push_back(to_move);
new_boundaries_for_movein.push_back(next_boundary);
benefit_move_in += d.GetBenefit();
if (benefit_move_out >= benefit_move_in) {
VLOG(2) << "Current Decision remains move out of branch\n";
} else {
final_d = Decision::Direction::kMoveIntoBranch;
VLOG(2) << "Current Decision is move into branch ("
<< to_move_in.size() << ")\n";
}
break;
case Decision::Direction::kNoChange:
VLOG(2) << "Decision is no change\n";
for (const Boundary& b : next_boundary) {
visitor.AddToWorkList(b);
VLOG(2) << "Adding new boundary to worklist:" << b.ToString()
<< "\n";
}
break;
}
}
if (final_d != Decision::Direction::kNoChange && conditional_is_shared) {
for (int i = 0; i < branch_count; ++i) {
HloComputation* branch_i = conditional->branch_computation(i);
if (conditional_computations[branch_i] > 0) {
HloComputation* clone_i =
conditional->GetModule()->AddEmbeddedComputation(
branch_i->Clone("clone", &clone_context));
conditional->set_branch_computation(i, clone_i);
conditional_computations[branch_i]--;
auto update_boundary = [&](Boundary& boundary) {
auto cloned_instr =
clone_context.FindInstruction(boundary.operands()[i]);
CHECK(cloned_instr != nullptr);
VLOG(2) << "boundary before cloning:" << boundary.operands()[i]
<< "\n";
boundary.mutable_operands()[i] = cloned_instr;
VLOG(2) << "boundary after cloning:" << boundary.operands()[i]
<< "\n";
};
if (final_d == Decision::Direction::kMoveOutOfBranch) {
for (int i = 0; i < to_move_out.size(); ++i) {
std::vector<Boundary>& m = to_move_out[i];
std::for_each(m.begin(), m.end(), update_boundary);
}
for (int i = 0; i < new_boundaries_for_moveout.size(); ++i) {
std::vector<Boundary>& m = new_boundaries_for_moveout[i];
std::for_each(m.begin(), m.end(), update_boundary);
}
}
}
}
VLOG(2) << "Cloned branches as needed: " << conditional->ToString()
<< "\n";
}
if (final_d == Decision::Direction::kMoveOutOfBranch) {
CHECK(to_move_out.size() == new_boundaries_for_moveout.size());
for (int i = 0; i < to_move_out.size(); ++i) {
TF_ASSIGN_OR_RETURN(bool result,
MoveInstructionOut(conditional, to_move_out[i],
new_boundaries_for_moveout[i]));
changed |= result;
}
VLOG(2) << "Done moving out of branches " << to_move_out.size()
<< " times. \n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching "
"0.\n";
})) {
break;
}
} else if (final_d == Decision::Direction::kMoveIntoBranch) {
CHECK(to_move_in.size() == new_boundaries_for_movein.size());
for (int i = 0; i < to_move_in.size(); ++i) {
if (to_move_in[i].empty()) {
continue;
}
VLOG(2) << "before opt:"
<< conditional->parent()->ToString(
HloPrintOptions::Fingerprint());
if (to_move_in[i][0].IsOutsideBranchOperand()) {
VLOG(1) << "Modifying code---number of operand boundaries to move in:"
<< to_move_in[i].size() << "\n";
TF_ASSIGN_OR_RETURN(bool result, MoveOperandInstructionsIn(
conditional, to_move_in[i]));
changed |= result;
} else {
VLOG(1) << "Modifying code---number of user boundaries to move in:"
<< to_move_in[i].size() << "\n";
CHECK(to_move_in[i][0].IsOutsideBranchUser());
TF_ASSIGN_OR_RETURN(
bool result, MoveUserInstructionsIn(conditional, to_move_in[i]));
changed |= result;
}
VLOG(2) << "Before removing instructions:"
<< conditional->parent()->ToString() << "\n";
for (int64_t j = to_move_in[i].size() - 1; j >= 0; j--) {
Boundary boundary_to_move_in = to_move_in[i][j];
HloInstruction* op = boundary_to_move_in.operands()[0];
if (op->user_count() == 0 && op->parent() != nullptr) {
VLOG(2) << "Removing boundary:" << boundary_to_move_in.ToString()
<< "\n";
TF_RETURN_IF_ERROR(conditional->parent()->RemoveInstruction(op));
VLOG(2) << "Done removing boundary.\n";
}
}
VLOG(2) << "Done moving instructions inside branches\n"
<< conditional->parent()->ToString(
HloPrintOptions::Fingerprint())
<< "\n";
VLOG(2) << "Done moving into branches " << to_move_in.size()
<< " times. \n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching "
"0.\n";
})) {
break;
}
}
} else if (pursue_full_conditional_code_motion_ && !conditional_is_shared) {
TF_ASSIGN_OR_RETURN(
bool convert_result,
ConvertSpecialMove(conditional, is_layout_sensitive_));
if (convert_result) {
VLOG(2) << "Done special moving of convert\n";
if (!ConsumeFuel("conditional_code_motion", [&] {
return "Skipping conditional opt after allowed limit reaching "
"0.\n";
})) {
break;
}
}
changed |= convert_result;
}
}
if (changed) {
HloPassPipeline subpipeline(
"after_conditional_code_motion_after_convert_hoisting");
VLOG(2) << "starting after motion passes: DCE\n";
subpipeline.AddPass<HloDCE>();
subpipeline.AddPass<TupleSimplifier>();
subpipeline.AddPass<HloDCE>();
TF_ASSIGN_OR_RETURN(auto cleanup_changed_now, subpipeline.Run(module));
cleanup_changed |= cleanup_changed_now;
}
if (cleanup_changed) {
VLOG(2) << "subpipeline cleanup have modified code\n";
}
return changed;
}
void ConditionalCodeMotion::SetDefaultMoveConfig() {
VLOG(2) << "search_config_index = " << search_config_index_ << "\n";
VLOG(2) << "search_config_ size = " << search_config_.size() << "\n";
int64_t cur_search_config = (search_config_index_ < 0 ||
search_config_index_ >= search_config_.size())
? 0
: search_config_[search_config_index_];
enum class TuningOption {
kDoNotTune = 0,
kTuneTransformationDecision = 1,
kTuneReuseModel = 2,
};
TuningOption tuning_option =
(cur_search_config == 0) ? TuningOption::kDoNotTune
: (cur_search_config > 0) ? TuningOption::kTuneTransformationDecision
: TuningOption::kTuneReuseModel;
auto row = HloOpcodeCount();
auto col = row;
VLOG(2) << "Start setting default configuration\n";
reuse_config_.clear();
move_config_.clear();
reuse_config_.reserve(row);
move_config_.reserve(row);
for (int64_t opcode = 0; opcode < row; ++opcode) {
std::vector<int64_t> reuse_vec(col, 0);
for (uint32_t j = 0; j < col; ++j) {
reuse_vec[j] = ReusesCarriedBy(static_cast<HloOpcode>(opcode),
static_cast<HloOpcode>(j));
}
reuse_config_.push_back(reuse_vec);
std::vector<int64_t> move_vec;
switch (tuning_option) {
case TuningOption::kTuneTransformationDecision:
move_vec.push_back(1);
break;
case TuningOption::kTuneReuseModel:
case TuningOption::kDoNotTune:
move_vec.reserve(col);
for (uint32_t j = 0; j < col; ++j) {
move_vec.push_back(WorthHoisting(static_cast<HloOpcode>(opcode),
static_cast<HloOpcode>(j)));
}
break;
}
move_config_.push_back(move_vec);
}
}
void ConditionalCodeMotion::ParseSearchConfiguration(
const std::string& search_config) {
if (search_config.empty()) {
return;
}
search_config_index_ = 0;
std::vector<std::string> configs = absl::StrSplit(search_config, ';');
for (const std::string& config : configs) {
std::vector<std::string> specs = absl::StrSplit(config, ',');
CHECK_EQ(specs.size(), 4);
int64_t condition_index;
CHECK(absl::SimpleAtoi(specs[0], &condition_index));
auto& cur_config_entry = search_config_map_[condition_index];
int64_t flip_start, max_flip, flip_stride;
CHECK(absl::SimpleAtoi(specs[1], &flip_start));
CHECK(absl::SimpleAtoi(specs[2], &max_flip));
CHECK(absl::SimpleAtoi(specs[3], &flip_stride));
int64_t cur_config = MakeSearchConfig(flip_start, max_flip, flip_stride);
cur_config_entry.push_back(cur_config);
VLOG(2) << "Setting search config " << condition_index << "->" << cur_config
<< "\n";
}
}
}
} | #include "xla/service/conditional_code_motion.h"
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace xla {
namespace conditional_opt {
using ConditionalCodeMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(ConditionalCodeMotionTest, MoveSubsetTupleOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(%convert.2894, %reshape.8493)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(%convert.3604, %add)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
get-first-index.2 = f32[2,512,364]{2,1,0} get-tuple-element(conditional), index=1
ROOT result = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(get-first-index, get-first-index.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert(), op::GetTupleElement())));
}
TEST_F(ConditionalCodeMotionTest, VerifyConditionalAnalysisWithWhileTuple) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
body {
%p_body = (f32[2], bf16[2], s32[]) parameter(0)
%val = f32[2] get-tuple-element(p_body), index=0
%val2 = bf16[2] get-tuple-element(p_body), index=1
%const = s32[] constant(-1)
ROOT root = (f32[2], bf16[2], s32[]) tuple(%val, %val2, %const)
}
condition {
%p_cond = (f32[2], bf16[2], s32[]) parameter(0)
%gte = s32[] get-tuple-element(%p_cond), index=2
%const = s32[] constant(42)
ROOT result = pred[] compare(%gte, %const), direction=EQ
}
on_true {
%arg_tuple.1 = f32[2] parameter(0)
%const = s32[] constant(42)
%add.8493 = f32[2] add(f32[2] %arg_tuple.1, f32[2] %arg_tuple.1)
%convert.2894 = bf16[2] convert(f32[2] %add.8493)
ROOT %tuple.1 = (f32[2], bf16[2], s32[]) tuple(%add.8493, %convert.2894, %const)
}
on_false {
%arg_tuple.1 = f32[2] parameter(0)
%const = s32[] constant(42)
%add.8493 = f32[2] add(f32[2] %arg_tuple.1, f32[2] %arg_tuple.1)
%convert.2894 = bf16[2] convert(f32[2] %add.8493)
%while_init = (f32[2], bf16[2], s32[]) tuple(%add.8493, %convert.2894, %const)
ROOT while = (f32[2], bf16[2], s32[]) while(%while_init), condition=condition, body=body
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = f32[2] parameter(1)
ROOT conditional = (f32[2], bf16[2], s32[]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOutConditionalRoot) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert())));
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOutConditional) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert())));
}
TEST_F(ConditionalCodeMotionTest, ConditionalShapeNotMutable) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
ROOT result = (bf16[2,512,364]{2,1,0}, (bf16[2,512,364]{2,1,0})) tuple(get-first-index, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK_NE(conditional, nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 1);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 1);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, UserShareOperandCannotBeMoved) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
constant.3 = f32[] constant(3)
constant.4 = f32[] constant(4)
constant.5 = f32[] constant(5)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(add.1, constant.2)
add.3 = f32[] add(add.1, constant.3)
add.4 = f32[] add(add.3, constant.5)
multiply.1 = f32[] multiply(add.4, constant.4)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.1, add.4)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.6 = f32[] constant(1)
constant.7 = f32[] constant(2)
constant.8 = f32[] constant(3)
constant.9 = f32[] constant(4)
constant.10 = f32[] constant(5)
add.4 = f32[] add(get-tuple-element.2, constant.6)
sub.1 = f32[] subtract(add.4, constant.7)
add.5 = f32[] add(add.4, constant.8)
add.6 = f32[] add(add.5, constant.10)
multiply.2 = f32[] multiply(sub.1, constant.9)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.2, add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
conditional = (f32[], f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[] get-tuple-element(conditional), index=0
get-second-index = f32[] get-tuple-element(conditional), index=1
ROOT result = f32[] add(get-first-index, get-second-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 9);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 11);
std::optional<int> on_false_sub_idx;
std::optional<int> on_false_add_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kAdd) {
on_false_add_idx = i;
} else if (root_operand->opcode() == HloOpcode::kSubtract) {
on_false_sub_idx = i;
}
}
ASSERT_TRUE(on_false_add_idx.has_value());
ASSERT_TRUE(on_false_sub_idx.has_value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Add(
op::Multiply(
op::GetTupleElement(op::Conditional(), *on_false_sub_idx),
op::Constant()),
op::GetTupleElement(op::Conditional(), *on_false_add_idx))));
}
TEST_F(ConditionalCodeMotionTest, ConditionalBoundaryAliasingBug) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[], f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.1), index=1
cos = f32[] cosine(get-tuple-element.2)
multiply.1 = f32[] multiply(get-tuple-element.1, cos)
ROOT res.1 = (f32[], f32[]) tuple(multiply.1, cos)
}
on_false {
arg_tuple.1 = (f32[], f32[]) parameter(0)
get-tuple-element.3 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.6 = f32[] constant(3)
multiply.2 = f32[] multiply(get-tuple-element.3, constant.6)
constant.2 = f32[] constant(0)
ROOT res.2 = (f32[], f32[]) tuple(multiply.2, constant.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
param.2 = f32[] parameter(1)
param.3 = f32[] parameter(2)
tuple = (f32[], f32[]) tuple(param.2, param.3)
conditional = (f32[], f32[])
conditional(pred.1, tuple, tuple), true_computation=on_true,
false_computation=on_false
get-tuple-element.3 = f32[] get-tuple-element(conditional), index=0
get-tuple-element.4 = f32[] get-tuple-element(conditional), index=1
ROOT result = f32[] add(get-tuple-element.3, get-tuple-element.4)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_false = conditional->branch_computation(1);
std::optional<int> on_false_gte_idx;
std::optional<int> on_false_const_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kGetTupleElement) {
on_false_gte_idx = i;
} else if (root_operand->opcode() == HloOpcode::kConstant) {
on_false_const_idx = i;
}
}
ASSERT_TRUE(on_false_gte_idx.has_value());
ASSERT_TRUE(on_false_const_idx.has_value());
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root->operand(0),
op::Multiply(
op::GetTupleElement(op::Conditional(), *on_false_gte_idx),
op::GetTupleElement(op::Conditional(), *on_false_const_idx)));
}
TEST_F(ConditionalCodeMotionTest, ConditionalRootElementChanged) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(get-tuple-element.1, constant.2)
add.3 = f32[] add(add.1, add.2)
ROOT tuple.3 = (f32[]) tuple(add.3)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.3 = f32[] constant(1)
constant.4 = f32[] constant(2)
add.4 = f32[] add(constant.4, constant.3)
add.5 = f32[] add(get-tuple-element.2, constant.4)
add.6 = f32[] add(add.4, add.5)
ROOT tuple.4 = (f32[]) tuple(add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
conditional = (f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[] get-tuple-element(conditional), index=0
ROOT result = f32[] add(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
EXPECT_EQ(on_true->instruction_count(), 3);
EXPECT_THAT(on_true->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 0)));
const HloComputation* on_false = conditional->branch_computation(1);
EXPECT_EQ(on_false->instruction_count(), 4);
std::optional<int> on_false_const_idx;
std::optional<int> on_false_gte_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kConstant) {
on_false_const_idx = i;
} else if (root_operand->opcode() == HloOpcode::kGetTupleElement) {
on_false_gte_idx = i;
}
}
ASSERT_TRUE(on_false_const_idx.has_value());
ASSERT_TRUE(on_false_gte_idx.has_value());
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(2.0)));
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_gte_idx),
op::GetTupleElement(op::Parameter(0), 0));
HloInstruction* root = module->entry_computation()->root_instruction();
auto get_first_index_matcher = op::Add(
op::Add(op::GetTupleElement(op::Conditional(), *on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(1.0))),
op::Add(op::GetTupleElement(op::Conditional(), *on_false_gte_idx),
op::Constant(LiteralUtil::CreateR0<float>(2.0))));
EXPECT_THAT(root, op::Add(get_first_index_matcher, get_first_index_matcher));
}
TEST_F(ConditionalCodeMotionTest, ConditionalIsRootInstruction) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
constant.3 = f32[] constant(3)
constant.4 = f32[] constant(4)
constant.5 = f32[] constant(5)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(add.1, constant.2)
add.3 = f32[] add(add.1, constant.3)
add.4 = f32[] add(add.3, constant.5)
multiply.1 = f32[] multiply(add.2, constant.4)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.1, add.4)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.6 = f32[] constant(1)
constant.7 = f32[] constant(2)
constant.8 = f32[] constant(3)
constant.9 = f32[] constant(4)
constant.10 = f32[] constant(5)
add.4 = f32[] add(get-tuple-element.2, constant.6)
sub.1 = f32[] subtract(add.4, constant.7)
add.5 = f32[] add(add.4, constant.8)
add.6 = f32[] add(add.5, constant.10)
multiply.2 = f32[] multiply(sub.1, constant.9)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.2, add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
ROOT conditional = (f32[], f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, LayoutMisMatchCannotMovedOut) {
absl::string_view hlo_string =
R"(
HloModule LayoutMisMatchCannotMovedOut
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
%arg_tuple.1 = (bf16[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = bf16[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%all-reduce.1 = bf16[93184,4]{1,0}
all-reduce(bf16[93184,4]{1,0} %get-tuple-element.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
%convert.2894 = f32[93184,4]{1,0} convert(bf16[93184, 4]{1,0} %all-reduce.1)
ROOT %tuple.1 = (f32[93184,4]{1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (bf16[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = bf16[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%copy.1 = bf16[93184,4]{0,1} copy(bf16[93184,4]{1,0} %get-tuple-element.3)
%all-reduce.2 = bf16[93184,4]{0, 1}
all-reduce(bf16[93184,4]{0, 1} %copy.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181
%convert.3604 = f32[93184,4]{0,1} convert(bf16[93184,4]{0,1} %all-reduce.2)
ROOT %tuple.2 = (f32[93184,4]{0,1}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (bf16[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (bf16[93184,4]{1,0}) parameter(2)
conditional = (f32[93184,4]{1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = f32[93184,4]{1,0} get-tuple-element(conditional), index=0
ROOT result = (f32[93184,4]{1,0}) tuple(get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveCrossModuleAllReduceOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.1 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.1 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.1),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT tuple.1 = (f32[3,3,128,128]) tuple(convert.1)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.2),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT tuple.2 = (f32[3,3,128,128]) tuple(convert.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
arg_tuple.5 = f32[3,3,128,128] parameter(3)
conditional = (f32[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = f32[3,3,128,128]
get-tuple-element(conditional), index=0
add.1 = f32[3,3,128,128] add(f32[3,3,128,128] get-first-index, f32[3,3,128,128] get-first-index)
ROOT result = (f32[3,3,128,128]) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::AllReduce(op::GetTupleElement(op::Conditional()))),
op::Convert(
op::AllReduce(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, DoNotMoveAllReduceIn) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
add.1 = bf16[3,3,128,128] add(bf16[3,3,128,128] convolution.1, bf16[3,3,128,128] convolution.1)
ROOT tuple.1 = (bf16[3,3,128,128]) tuple(add.1)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
add.2 = bf16[3,3,128,128] add(bf16[3,3,128,128] convolution.2, bf16[3,3,128,128] convolution.2)
ROOT tuple.2 = (bf16[3,3,128,128]) tuple(add.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
arg_tuple.5 = f32[3,3,128,128] parameter(3)
conditional = (bf16[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = bf16[3,3,128,128] get-tuple-element(conditional), index=0
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %get-first-index),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT result = (f32[3,3,128,128]) tuple(convert.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 6);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 6);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert(op::AllReduce(
op::GetTupleElement(op::Conditional()))))));
}
TEST_F(ConditionalCodeMotionTest, MovePowOpIn) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
on_false {
arg_tuple.2 = (f32[10]) parameter(0)
get-tuple-element.2 = f32[10] get-tuple-element(arg_tuple.2), index=0
mul.1 = f32[10] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.4 = (f32[10]) tuple(mul.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[10] get-tuple-element(conditional), index=0
ROOT pow.1 = f32[10] power(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MoveInWithMultipleGTE) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
on_false {
arg_tuple.2 = (f32[10]) parameter(0)
get-tuple-element.2 = f32[10] get-tuple-element(arg_tuple.2), index=0
mul.1 = f32[10] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.4 = (f32[10]) tuple(mul.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[10] get-tuple-element(conditional), index=0
get-first-index.2 = f32[10] get-tuple-element(conditional), index=0
pow.1 = f32[10] power(get-first-index, get-first-index.2)
ROOT tuple.3 = (f32[10], f32[10]) tuple(pow.1, get-first-index.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MoveOutWithSharedBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
get-first-index = f32[10] get-tuple-element(conditional), index=0
ROOT pow.1 = f32[10] power(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 1);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 1);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, AllOf(op::Power(op::Add(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())),
op::Add(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())))));
}
TEST_F(ConditionalCodeMotionTest, MovePowInWithNonTupleRoot) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
ROOT add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = f32[10]
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
ROOT pow.1 = f32[10] power(conditional, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MovePowInWithEmptyBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch1 {
arg_tuple.1 = (f32[10]) parameter(0)
get-tuple-element.1 = f32[10] get-tuple-element(arg_tuple.1), index=0
add.1 = f32[10] add(get-tuple-element.1, get-tuple-element.1)
ROOT tuple.3 = (f32[10]) tuple(add.1)
}
branch2 {
ROOT arg_tuple.1 = (f32[10]) parameter(0)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[10]) parameter(1)
tuple.2 = (f32[10]) parameter(2)
conditional = (f32[10])
conditional(pred.1, tuple.1, tuple.2), true_computation=branch1,
false_computation=branch2
get-first-index = f32[10] get-tuple-element(conditional), index=0
ROOT pow.1 = f32[10] power(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 4);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MovePowInWithNonTupleParameter) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg.1 = f32[10] parameter(0)
ROOT add.1 = f32[10] add(arg.1, arg.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = f32[10] parameter(1)
tuple.2 = f32[10] parameter(2)
conditional = f32[10]
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
ROOT pow.1 = f32[10] power(conditional, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 4);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 4);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::GetTupleElement(op::Conditional())));
}
TEST_F(ConditionalCodeMotionTest, MoveCopyInBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch1 {
arg_tuple.1 = (s32[], f32[10,3]{0,1}) parameter(0)
constant.1 = s32[] constant(4)
get-tuple-element.1 = s32[] get-tuple-element(arg_tuple.1), index=0
add.1 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = f32[10,3]{0,1} get-tuple-element(arg_tuple.1), index=1
slice.1 = f32[4,3]{0,1} slice(get-tuple-element.2),
slice={[0:4:1], [0:3:1]}
constant.2 = f32[] constant(0.0)
ROOT tuple.1 = (f32[4,3]{0,1}, s32[],f32[]) tuple(slice.1, add.1, constant.2)
}
branch2 {
arg_tuple.2 = (s32[], f32[4,3]{1,0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(arg_tuple.2), index=0
copy.1 = s32[] copy(get-tuple-element.3)
get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element(arg_tuple.2), index=1
copy.2 = f32[4,3]{0,1} copy(get-tuple-element.4)
constant.2 = f32[] constant(0.0)
ROOT tuple.2 = (f32[4,3]{0,1}, s32[], f32[]) tuple(copy.2, copy.1, constant.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.3 = (s32[], f32[10,3]{0,1}) parameter(1)
tuple.4 = (s32[], f32[4,3]{1,0}) parameter(2)
conditional = (f32[4,3]{0,1}, s32[], f32[])
conditional(pred.1, tuple.3, tuple.4), true_computation=branch1,
false_computation=branch2
get-zero-index = f32[4,3]{0,1} get-tuple-element(conditional), index=0
get-first-index = s32[] get-tuple-element(conditional), index=1
get-second-index = f32[] get-tuple-element(conditional), index=2
copy.3 = f32[4,3]{1,0} copy(get-zero-index)
ROOT tuple.5 = (f32[4,3]{0,1}, s32[], f32[]) tuple(copy.3, get-first-index,
get-second-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 9);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 8);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Tuple(op::GetTupleElement(op::Conditional(), 2),
op::GetTupleElement(op::Conditional(), 0),
op::GetTupleElement(op::Conditional(), 1))));
}
TEST_F(ConditionalCodeMotionTest, MoveCopy2InBranch) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%branch0 (state.2: (f32[1,3,2])) -> (f32[1,3,2]) {
%state.2 = (f32[1,3,2]{1,2,0}) parameter(0)
%get-tuple-element.32 = f32[1,3,2]{1,2,0} get-tuple-element((f32[1,3,2]{1,2,0}) %state.2), index=0
%copy.1 = f32[1,3,2]{0,2,1} copy(f32[1,3,2]{1,2,0} %get-tuple-element.32)
ROOT %tuple.13 = (f32[1,3,2]{0,2,1}) tuple(f32[1,3,2]{0,2,1} %copy.1)
}
%branch1 (state.1: (s32[], f32[8,3,2], s32[2])) -> (f32[1,3,2]) {
%state.1 = (s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) parameter(0)
%get-tuple-element.17 = f32[8,3,2]{0,2,1} get-tuple-element((s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %state.1), index=1
%get-tuple-element.18 = s32[2]{0} get-tuple-element((s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %state.1), index=2
%get-tuple-element.16 = s32[] get-tuple-element((s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %state.1), index=0
%dynamic-slice.3 = s32[1]{0} dynamic-slice(s32[2]{0} %get-tuple-element.18, s32[] %get-tuple-element.16), dynamic_slice_sizes={1}
%reshape.19 = s32[] reshape(s32[1]{0} %dynamic-slice.3)
%constant.21 = s32[] constant(0)
%dynamic-slice.4 = f32[1,3,2]{0,2,1} dynamic-slice(f32[8,3,2]{0,2,1} %get-tuple-element.17, s32[] %reshape.19, s32[] %constant.21, s32[] %constant.21), dynamic_slice_sizes={1,3,2}
ROOT %tuple.9 = (f32[1,3,2]{0,2,1}) tuple(f32[1,3,2]{0,2,1} %dynamic-slice.4)
}
ENTRY %f32_8_3_2__1-1.32 (idxs.1: s32[2], single_io.2: f32[8,3,2], repeated_io_0.3: f32[1,3,2]) -> (f32[1,3,2]) {
%idxs.1 = s32[2]{0} parameter(0)
%slice.10 = s32[1]{0} slice(s32[2]{0} %idxs.1), slice={[0:1]}
%reshape.11 = s32[] reshape(s32[1]{0} %slice.10)
%constant.12 = s32[] constant(0)
%compare.13 = pred[] compare(s32[] %reshape.11, s32[] %constant.12), direction=EQ
%repeated_io_0.3 = f32[1,3,2]{1,2,0} parameter(2)
%tuple.11 = (f32[1,3,2]{1,2,0}) tuple(f32[1,3,2]{1,2,0} %repeated_io_0.3)
%constant.5 = s32[] constant(1)
%single_io.2 = f32[8,3,2]{0,2,1} parameter(1)
%tuple.15 = (s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) tuple(s32[] %constant.5, f32[8,3,2]{0,2,1} %single_io.2, s32[2]{0} %idxs.1)
%conditional.28 = (f32[1,3,2]{0,2,1}) conditional(pred[] %compare.13, (f32[1,3,2]{1,2,0}) %tuple.11, (s32[], f32[8,3,2]{0,2,1}, s32[2]{0}) %tuple.15), true_computation=%branch0, false_computation=%branch1
%get-tuple-element.33 = f32[1,3,2]{0,2,1} get-tuple-element((f32[1,3,2]{0,2,1}) %conditional.28), index=0
%copy.2 = f32[1,3,2]{1,2,0} copy(f32[1,3,2]{0,2,1} %get-tuple-element.33)
ROOT %tuple.16 = (f32[1,3,2]{1,2,0}) tuple(f32[1,3,2]{1,2,0} %copy.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
}
TEST_F(ConditionalCodeMotionTest, MoveReplicatedTupleEntryOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.1 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
convert.1 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.1)
all-reduce.3 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
convert.3 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.3)
ROOT tuple.1 = (f32[3,3,128,128], f32[3,3,128,128]) tuple(convert.1, convert.3)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.2),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2)
ROOT tuple.2 = (f32[3,3,128,128], f32[3,3,128,128]) tuple(convert.2, convert.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
conditional = (f32[3,3,128,128], f32[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = f32[3,3,128,128]
get-tuple-element(conditional), index=0
add.1 = f32[3,3,128,128] add(f32[3,3,128,128] get-first-index, f32[3,3,128,128] get-first-index)
ROOT result = (f32[3,3,128,128]) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::AllReduce(op::GetTupleElement(op::Conditional()))),
op::Convert(
op::AllReduce(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, DoNotMoveWithExtraOperand) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
branch {
arg.1 = f32[10] parameter(0)
ROOT add.1 = f32[10] add(arg.1, arg.1)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = f32[10] parameter(1)
tuple.2 = f32[10] parameter(2)
conditional = f32[10]
conditional(pred.1, tuple.1, tuple.2), true_computation=branch,
false_computation=branch
ROOT pow.1 = f32[10] power(conditional, tuple.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MultipleIndependentMoveIns) {
absl::string_view hlo_string =
R"(
HloModule FromNMT
%add.31755 (x.139: f32[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%nmt.1 {
%wide_param.3 = (bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) parameter(0)
%get-tuple-element.16525 = bf16[1024,4096]{1,0} get-tuple-element((bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) %wide_param.3), index=0
%get-tuple-element.16527 = bf16[18,64,1024]{2,1,0} get-tuple-element((bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) %wide_param.3), index=1
%get-tuple-element.16588 = s32[] get-tuple-element((bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) %wide_param.3), index=2
%add.3764 = s32[] add(s32[] %get-tuple-element.16588, s32[] %get-tuple-element.16588), metadata={op_type="Sub" op_name="sub"}
%reshape.9821 = s32[1]{0} reshape(s32[] %add.3764)
%reshape.9822 = s32[] reshape(s32[1]{0} %reshape.9821)
%constant.13127 = s32[] constant(0)
%dynamic-slice.1245 = bf16[1,64,1024]{2,1,0} dynamic-slice(bf16[18,64,1024]{2,1,0} %get-tuple-element.16527, s32[] %reshape.9822, s32[] %constant.13127, s32[] %constant.13127), dynamic_slice_sizes={1,64,1024}
%reshape.9825 = bf16[64,1024]{1,0} reshape(bf16[1,64,1024]{2,1,0} %dynamic-slice.1245), metadata={op_type="GatherV2" op_name="GatherV2"}
%logistic.814 = bf16[64,1024]{1,0} logistic(bf16[64,1024]{1,0} %reshape.9825), metadata={op_type="Sigmoid" op_name="Sigmoid"}
%multiply.4890 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %reshape.9825, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="Mul" op_name="mul"}
%tanh.573 = bf16[64,1024]{1,0} tanh(bf16[64,1024]{1,0} %reshape.9825), metadata={op_type="Tanh" op_name="Tanh"}
%multiply.4891 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %logistic.814, bf16[64,1024]{1,0} %tanh.573), metadata={op_type="Mul" op_name="mul_1"}
%add.3766 = bf16[64,1024]{1,0} add(bf16[64,1024]{1,0} %multiply.4890, bf16[64,1024]{1,0} %multiply.4891), metadata={op_type="AddV2" op_name="add_1"}
%multiply.4894 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %add.3766, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="Mul" op_name="gradients_1/mul_grad/Mul"}
%constant.10568 = bf16[] constant(1), metadata={op_type="TanhGrad" op_name="gradients/Tanh_1_grad/TanhGrad"}
%broadcast.7198 = bf16[64,1024]{1,0} broadcast(bf16[] %constant.10568), dimensions={}, metadata={op_type="TanhGrad" op_name="gradients/Tanh_1_grad/TanhGrad"}
%multiply.4896 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %tanh.573, bf16[64,1024]{1,0} %tanh.573), metadata={op_type="TanhGrad" op_name="gradients/Tanh_1_grad/TanhGrad"}
%constant.10571 = bf16[] constant(1), metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_grad/SigmoidGrad"}
%broadcast.7201 = bf16[64,1024]{1,0} broadcast(bf16[] %constant.10571), dimensions={}, metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_grad/SigmoidGrad"}
%subtract.1702 = bf16[64,1024]{1,0} subtract(bf16[64,1024]{1,0} %broadcast.7201, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_grad/SigmoidGrad"}
%multiply.4907 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %tanh.573, bf16[64,1024]{1,0} %add.3766), metadata={op_type="Mul" op_name="gradients/mul_2_grad/Mul_1"}
%multiply.4908 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %multiply.4907, bf16[64,1024]{1,0} %logistic.814), metadata={op_type="SigmoidGrad" op_name="gradients/Sigmoid_2_grad/SigmoidGrad"}
%dot.781 = bf16[64,4096]{1,0} dot(bf16[64,1024]{1,0} %multiply.4908, bf16[1024,4096]{1,0} %get-tuple-element.16525), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="MatMul" op_name="MatMul"}
ROOT %tuple.3200 = (bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) tuple(bf16[64,1024]{1,0} %multiply.4894, bf16[64,4096]{1,0} %dot.781, s32[] %reshape.9822)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) parameter(1)
arg_tuple.4 = (bf16[1024,4096]{1,0}, bf16[18,64,1024]{2,1,0}, s32[]) parameter(2)
%arg.2 = s32[] parameter(3)
%conditional.3 = (bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=nmt.1, false_computation=nmt.1
%get-tuple-element.15889 = bf16[64,1024]{1,0} get-tuple-element((bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) %conditional.3), index=0, metadata={op_type="Case" op_name="switch_case/indexed_case"}
%multiply.4596 = bf16[64,1024]{1,0} multiply(bf16[64,1024]{1,0} %get-tuple-element.15889, bf16[64,1024]{1,0} %get-tuple-element.15889), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%constant.10279 = bf16[] constant(0), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%reduce.844 = bf16[] reduce(bf16[64,1024]{1,0} %multiply.4596, bf16[] %constant.10279), dimensions={0,1}, to_apply=%add.31755, metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%get-tuple-element.15890 = bf16[64,4096]{1,0} get-tuple-element((bf16[64,1024]{1,0}, bf16[64,4096]{1,0}, s32[]) %conditional.3), index=1, metadata={op_type="Case" op_name="switch_case/indexed_case"}
%multiply.4597 = bf16[64,4096]{1,0} multiply(bf16[64,4096]{1,0} %get-tuple-element.15890, bf16[64,4096]{1,0} %get-tuple-element.15890), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%constant.10280 = bf16[] constant(0), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%reduce.845 = bf16[] reduce(bf16[64,4096]{1,0} %multiply.4597, bf16[] %constant.10280), dimensions={0,1}, to_apply=%add.31755, metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
%multiply.4667 = bf16[] multiply(bf16[] %reduce.845, bf16[]{:T(128)} %reduce.844), metadata={op_type="L2Loss" op_name="global_norm/L2Loss"}
ROOT %tuple.3200 = (bf16[], s32[]) tuple(%multiply.4667, s32[] %arg.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional.3");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 27);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 27);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::GetTupleElement(op::Conditional()),
op::Parameter())));
}
TEST_F(ConditionalCodeMotionTest, TestConfigurationFlag) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(add.1)
}
)";
for (int max_flip = 1; max_flip < 3; ++max_flip) {
for (int flip_stride = 1; flip_stride < ((max_flip > 1) ? 7 : 2);
++flip_stride) {
for (int flip_start = 0; flip_start < 7; ++flip_start) {
int64_t search_config = ConditionalCodeMotion::MakeSearchConfig(
flip_start, max_flip, flip_stride);
ConditionalCodeMotion pass(true, true, search_config);
VLOG(1) << "Testing max_flip=" << max_flip
<< "; flip_start = " << flip_start
<< "; flip_stride = " << flip_stride
<< "; search_config=" << search_config;
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
bool opt_result = pass.Run(&*module).value();
if (flip_start < 2 && max_flip > 1 && flip_stride == 1) {
CHECK_EQ(opt_result, false);
continue;
}
CHECK_EQ(opt_result, true);
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
const HloComputation* on_false = conditional->branch_computation(1);
HloInstruction* root = module->entry_computation()->root_instruction();
switch (flip_start) {
case 0:
[[fallthrough]];
case 1:
ASSERT_EQ(on_true->instruction_count(), 6);
ASSERT_EQ(on_false->instruction_count(), 6);
EXPECT_THAT(root, AllOf(op::Conditional()));
break;
case 2:
ASSERT_EQ(on_true->instruction_count(), 4);
ASSERT_EQ(on_false->instruction_count(), 4);
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::GetTupleElement(op::Conditional())),
op::Convert(op::GetTupleElement(op::Conditional()))))));
break;
case 3:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(root, AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
case 4:
case 5:
case 6:
ASSERT_EQ(on_true->instruction_count(), 2);
ASSERT_EQ(on_false->instruction_count(), 2);
EXPECT_THAT(root,
AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional())))),
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional()))))))));
break;
default:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(root, AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
}
}
}
}
}
TEST_F(ConditionalCodeMotionTest, TestMultipleConfigurationFlags) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
pred.2 = pred[] parameter(3)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
conditional.2 = (bf16[2,512,364]{2,1,0}) conditional(pred.2, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index.2 = bf16[2,512,364]{2,1,0} get-tuple-element(conditional.2), index=0
add.2 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index.2, bf16[2,512,364]{2,1,0} get-first-index.2)
ROOT result = (bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(add.1, add.2)
}
)";
for (int max_flip = 1; max_flip < 3; ++max_flip) {
for (int flip_stride = 1; flip_stride < ((max_flip > 1) ? 7 : 2);
++flip_stride) {
for (int flip_start = 0; flip_start < 7; ++flip_start) {
std::stringstream config_stream;
config_stream << 0 << "," << flip_start << "," << max_flip << ","
<< flip_stride << ";";
config_stream << 1 << "," << flip_start << "," << max_flip << ","
<< flip_stride;
auto search_config = config_stream.str();
ConditionalCodeMotion pass(true, true, search_config);
VLOG(1) << "Testing max_flip=" << max_flip
<< "; flip_start = " << flip_start
<< "; flip_stride = " << flip_stride
<< "; search_config=" << search_config;
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
bool opt_result = pass.Run(&*module).value();
if (flip_start < 2 && max_flip > 1 && flip_stride == 1) {
CHECK_EQ(opt_result, false);
continue;
}
CHECK_EQ(opt_result, true);
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
const HloComputation* on_false = conditional->branch_computation(1);
HloInstruction* root = module->entry_computation()->root_instruction();
switch (flip_start) {
case 0:
[[fallthrough]];
case 1:
ASSERT_EQ(on_true->instruction_count(), 6);
ASSERT_EQ(on_false->instruction_count(), 6);
EXPECT_THAT(
root, AllOf(op::Tuple(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional()))));
break;
case 2:
ASSERT_EQ(on_true->instruction_count(), 4);
ASSERT_EQ(on_false->instruction_count(), 4);
EXPECT_THAT(
root,
AllOf(op::Tuple(
op::Add(
op::Convert(op::GetTupleElement(op::Conditional())),
op::Convert(op::GetTupleElement(op::Conditional()))),
op::Add(
op::Convert(op::GetTupleElement(op::Conditional())),
op::Convert(op::GetTupleElement(op::Conditional()))))));
break;
case 3:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(
root, AllOf(op::Tuple(
op::Add(op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional())))),
op::Add(op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
case 4:
case 5:
case 6:
ASSERT_EQ(on_true->instruction_count(), 2);
ASSERT_EQ(on_false->instruction_count(), 2);
EXPECT_THAT(
root,
AllOf(op::Tuple(
op::Add(op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional())))),
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional()))))),
op::Add(op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional())))),
op::Convert(op::Reshape(op::GetTupleElement(
op::GetTupleElement(op::Conditional()))))))));
break;
default:
ASSERT_EQ(on_true->instruction_count(), 1);
ASSERT_EQ(on_false->instruction_count(), 1);
EXPECT_THAT(root, AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(
op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(
op::Conditional())))))));
break;
}
}
}
}
}
TEST_F(ConditionalCodeMotionTest, ShapeChangingMovePreservesSharding) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%on_true (arg_tuple.1: (f32[10])) -> (f32[10]) {
%arg_tuple.1 = (f32[10]{0}) parameter(0), sharding={{devices=[4]0,1,2,3}}
%get-tuple-element.1 = f32[10]{0} get-tuple-element((f32[10]{0}) %arg_tuple.1), index=0, sharding={devices=[4]0,1,2,3}
%add.1 = f32[10]{0} add(f32[10]{0} %get-tuple-element.1, f32[10]{0} %get-tuple-element.1), sharding={devices=[4]0,1,2,3}
ROOT %tuple.3 = (f32[10]{0}) tuple(f32[10]{0} %add.1), sharding={{devices=[4]0,1,2,3}}
}
%on_false (arg_tuple.2: (f32[10])) -> (f32[10]) {
%arg_tuple.2 = (f32[10]{0}) parameter(0), sharding={{devices=[4]0,1,2,3}}
%get-tuple-element.2 = f32[10]{0} get-tuple-element((f32[10]{0}) %arg_tuple.2), index=0, sharding={devices=[4]0,1,2,3}
%mul.1 = f32[10]{0} multiply(f32[10]{0} %get-tuple-element.2, f32[10]{0} %get-tuple-element.2), sharding={devices=[4]0,1,2,3}
ROOT %tuple.4 = (f32[10]{0}) tuple(f32[10]{0} %mul.1), sharding={{devices=[4]0,1,2,3}}
}
ENTRY %main (pred.1: pred[], tuple.1: (f32[10]), tuple.2: (f32[10])) -> (f32[10], f32[10]) {
%pred.1 = pred[] parameter(0), sharding={replicated}
%tuple.1 = (f32[10]{0}) parameter(1), sharding={{replicated}}
%tuple.2 = (f32[10]{0}) parameter(2), sharding={{devices=[4]0,1,2,3}}
%conditional = (f32[10]{0}) conditional(pred[] %pred.1, (f32[10]{0}) %tuple.1, (f32[10]{0}) %tuple.2), true_computation=%on_true, false_computation=%on_false, sharding={{devices=[4]0,1,2,3}}
%get-first-index = f32[10]{0} get-tuple-element((f32[10]{0}) %conditional), index=0, sharding={devices=[4]0,1,2,3}
%get-first-index.2 = f32[10]{0} get-tuple-element((f32[10]{0}) %conditional), index=0, sharding={devices=[4]0,1,2,3}
%pow.1 = f32[10]{0} power(f32[10]{0} %get-first-index, f32[10]{0} %get-first-index.2), sharding={devices=[4]0,1,2,3}
ROOT %tuple.0 = (f32[10]{0}, f32[10]{0}) tuple(f32[10]{0} %pow.1, f32[10]{0} %get-first-index.2), sharding={{devices=[4]0,1,2,3}, {devices=[4]0,1,2,3}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ConditionalCodeMotion pass(true, true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(op::Conditional()),
op::GetTupleElement(op::Conditional())));
EXPECT_EQ(root->operand(0)->operand(0), root->operand(1)->operand(0));
const HloInstruction* conditional = root->operand(0)->operand(0);
EXPECT_THAT(
conditional,
AnyOf(op::NoSharding(),
op::Sharding("{{devices=[4]0,1,2,3},{devices=[4]0,1,2,3}}")));
}
TEST_F(ConditionalCodeMotionTest, ConvertDuplicate) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert, %convert)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717)
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert, %convert)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (bf16[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
VLOG(2) << "module:\n" << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
}
TEST_F(ConditionalCodeMotionTest, NestedConvert) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
%convert.2894 = f32[2,512,364]{2,1,0} convert(bf16[2,512,364]{2,1,0} %convert)
ROOT %tuple.1 = ( f32[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert.2894, %convert)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = f32[2,512,364]{2,1,0} convert(%convert)
ROOT %tuple.2 = (f32[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) tuple(%convert.3604, %convert)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (f32[2,512,364]{2,1,0}, bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
VLOG(2) << "module:\n" << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
VLOG(2) << "module:\n" << module->ToString();
}
TEST_F(ConditionalCodeMotionTest, NestedConditionalDisableMoveConvert) {
absl::string_view hlo_string =
R"(
HloModule xla_computation_unknown.45
%branch_0_comp.11 (parameter.12: (u32[])) -> (s8[]) {
%parameter.12 = (u32[]) parameter(0)
%get-tuple-element.13 = u32[] get-tuple-element((u32[]) %parameter.12), index=0
%convert.15 = s8[] convert(u32[] %get-tuple-element.13)
ROOT %tuple.18 = (s8[]) tuple(s8[] %convert.15)
}
%branch_0_comp__1.19 (parameter.20: (pred[])) -> (s8[]) {
%parameter.20 = (pred[]) parameter(0)
%get-tuple-element.21 = pred[] get-tuple-element((pred[]) %parameter.20), index=0
%convert.23 = s8[] convert(pred[] %get-tuple-element.21)
ROOT %tuple.24 = (s8[]) tuple(s8[] %convert.23)
}
%branch_1_comp__1.25 (parameter.26: (pred[])) -> (s8[]) {
%parameter.26 = (pred[]) parameter(0)
%get-tuple-element.27 = pred[] get-tuple-element((pred[]) %parameter.26), index=0
%convert.29 = s8[] convert(pred[] %get-tuple-element.27)
ROOT %tuple.30 = (s8[]) tuple(s8[] %convert.29)
}
%branch_1_comp.31 (parameter.32: (u32[])) -> (s8[]) {
%parameter.32 = (u32[]) parameter(0)
%get-tuple-element.33 = u32[] get-tuple-element((u32[]) %parameter.32), index=0
%convert.35 = pred[] convert(u32[] %get-tuple-element.33)
%convert.36 = s32[] convert(pred[] %convert.35)
%constant.37 = pred[] constant(true)
%tuple.38 = (pred[]) tuple(pred[] %constant.37)
ROOT %conditional.39 = (s8[]) conditional(s32[] %convert.36, (pred[]) %tuple.38, (pred[]) %tuple.38), branch_computations={%branch_0_comp__1.19, %branch_1_comp__1.25}
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation_unknown.45 (parameter.3: u8[], parameter.4: u8[], parameter.5: u32[15,14]) -> (s8[]) {
%parameter.3 = u8[] parameter(0)
%parameter.4 = u8[] parameter(1)
%compare.7 = pred[] compare(u8[] %parameter.3, u8[] %parameter.4), direction=LT
%convert.9 = s32[] convert(pred[] %compare.7)
%parameter.5 = u32[15,14]{1,0} parameter(2)
%constant.2 = u32[] constant(0)
%reduce.1 = u32[] reduce(u32[15,14]{1,0} %parameter.5, u32[] %constant.2), dimensions={1,0}, to_apply=%scalar_add_computation.1
%tuple.10 = (u32[]) tuple(u32[] %reduce.1)
ROOT %conditional.42 = (s8[]) conditional(s32[] %convert.9, (u32[]) %tuple.10, (u32[]) %tuple.10), branch_computations={%branch_0_comp.11, %branch_1_comp.31}
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands1) {
absl::string_view hlo_string =
R"(
HloModule xla_computation_unknown.45
%branch_0_comp.11 (parameter.12: (u32[])) -> (s8[]) {
%parameter.12 = (u32[], u32[]) parameter(0)
%get-tuple-element.13 = u32[] get-tuple-element(%parameter.12), index=1
%convert.15 = s8[] convert(u32[] %get-tuple-element.13)
ROOT %tuple.18 = (s8[]) tuple(s8[] %convert.15)
}
%branch_0_comp__1.19 (parameter.20: (pred[])) -> (s8[]) {
%parameter.20 = (pred[],s8[]) parameter(0)
%get-tuple-element.21 = pred[] get-tuple-element(%parameter.20), index=0
%convert.23 = s8[] convert(pred[] %get-tuple-element.21)
ROOT %tuple.24 = (s8[]) tuple(s8[] %convert.23)
}
%branch_1_comp__1.25 (parameter.26: (pred[])) -> (s8[]) {
%parameter.26 = (pred[],s8[]) parameter(0)
%get-tuple-element.27 = s8[] get-tuple-element(%parameter.26), index=1
ROOT %tuple.30 = (s8[]) tuple(s8[] %get-tuple-element.27)
}
%branch_1_comp.31 (parameter.32: (u32[])) -> (s8[]) {
%parameter.32 = (u32[], u32[]) parameter(0)
%get-tuple-element.33 = u32[] get-tuple-element(%parameter.32), index=0
%convert.35 = pred[] convert(%get-tuple-element.33)
%convert.36 = s32[] convert(%get-tuple-element.33)
%constant.37 = s8[] constant(1)
%add.0 = s8[] add(constant.37, constant.37)
%tuple.38 = (pred[], s8[]) tuple(pred[] %convert.35, s8[] add.0)
ROOT %conditional.39 = (s8[]) conditional(%convert.36, %tuple.38, %tuple.38), branch_computations={%branch_0_comp__1.19, %branch_1_comp__1.25}
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation_unknown.45 (parameter.3: u8[], parameter.4: u8[], parameter.5: u32[15,14]) -> (s8[]) {
%parameter.3 = u8[] parameter(0)
%parameter.4 = u8[] parameter(1)
%compare.7 = pred[] compare(u8[] %parameter.3, u8[] %parameter.4), direction=LT
%convert.9 = s32[] convert(pred[] %compare.7)
%parameter.5 = u32[15,14]{1,0} parameter(2)
%constant.2 = u32[] constant(0)
%reduce.1 = u32[] reduce(u32[15,14]{1,0} %parameter.5, u32[] %constant.2), dimensions={1,0}, to_apply=%scalar_add_computation.1
%tuple.10 = (u32[], u32[]) tuple(%reduce.1, constant.2)
ROOT %conditional.42 = (s8[]) conditional(s32[] %convert.9, %tuple.10, %tuple.10), branch_computations={%branch_0_comp.11, %branch_1_comp.31}
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
VLOG(3) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 4);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
HloInstruction* conditional_39 =
root->branch_computation(1)->root_instruction();
CHECK_EQ(conditional_39->opcode(), HloOpcode::kConditional);
const HloInstruction* conditional_39_pred = conditional_39->operand(0);
EXPECT_THAT(
conditional_39_pred,
op::Convert(op::Reduce(op::GetTupleElement(), op::GetTupleElement())));
const HloInstruction* conditional_39_true =
conditional_39->branch_computation(0)->root_instruction();
EXPECT_THAT(conditional_39_true, op::Tuple(op::Convert(op::Convert(
op::GetTupleElement(op::Parameter())))));
const HloInstruction* conditional_39_false =
conditional_39->branch_computation(1)->root_instruction();
EXPECT_THAT(conditional_39_false,
op::Tuple(op::Add(op::Constant(), op::Constant())));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands2) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
tmp_5 = f32[1]{0} reshape(f32[] tmp_4)
ROOT tmp_6 = (f32[], f32[1]{0}) tuple(f32[] tmp_4, f32[1]{0} tmp_5)
}
%branch_false {
tmp_0 = (f32[]) parameter(0)
tmp_1 = f32[] get-tuple-element((f32[]) tmp_0), index=0
tmp_2 = f32[1]{0} reshape(f32[] tmp_1)
ROOT tmp_3 = (f32[], f32[1]{0}) tuple(f32[] tmp_1, f32[1]{0} tmp_2)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.87 = (f32[]) tuple(f32[] %multiply.13463)
ROOT conditional.1 = (f32[], f32[1]{0}) conditional(%parameter.2, %parameter.1, %tuple.87), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 7);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 9);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(
op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement()))),
op::Reshape(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement()))))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands3) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
ROOT tmp_5 = (f32[]) tuple(tmp_4)
}
%branch_false {
ROOT tmp_0 = (f32[]) parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.87 = (f32[]) tuple(f32[] %multiply.13463)
ROOT conditional.1 = (f32[]) conditional(%parameter.2, %parameter.1, %tuple.87), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 6);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement())))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands4) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
tmp_5 = (f32[]) tuple(tmp_4)
ROOT tmp_6 = ((f32[])) tuple(tmp_5)
}
%branch_false {
tmp_0 = (f32[]) parameter(0)
ROOT tmp_1 = ((f32[])) tuple(tmp_0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.87 = (f32[]) tuple(f32[] %multiply.13463)
ROOT conditional.1 = ((f32[])) conditional(%parameter.2, %parameter.1, %tuple.87), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 7);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 9);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement()))))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands5) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[]{:T(256)}, f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[]{:T(256)} get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
ROOT tmp_5 = (f32[]) tuple(tmp_4)
}
%branch_false {
tmp_0 = f32[] parameter(0)
ROOT tmp_1 = (f32[]) tuple(tmp_0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = ((f32[], f32[])) parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
ROOT conditional.1 = (f32[]) conditional(%parameter.2, %parameter.1, %multiply.13463), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 6);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement())))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands6) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp_0 = ((f32[], f32[])) parameter(0)
tmp_1 = (f32[], f32[]) get-tuple-element(((f32[], f32[])) tmp_0), index=0
tmp_2 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=0
tmp_3 = f32[] get-tuple-element((f32[], f32[]) tmp_1), index=1
tmp_4 = f32[] multiply(f32[] tmp_2, f32[] tmp_3)
ROOT tmp_5 = (f32[]) tuple(tmp_4)
}
%branch_false {
tmp_0 = f32[] parameter(0)
ROOT tmp_1 = (f32[]) tuple(tmp_0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = f32[] parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%add.0 = f32[] add(parameter.1, parameter.1)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %constant.13862, f32[] %power.1)
%tuple.1 = (f32[], f32[]) tuple(add.0, add.0)
%tuple.2 = ((f32[], f32[])) tuple(%tuple.1)
ROOT conditional.1 = (f32[]) conditional(%parameter.2, %tuple.2, %multiply.13463), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 6);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 8);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(
conditional_false,
op::Tuple(op::Multiply(
op::Constant(),
op::Power(op::Constant(), op::Floor(op::GetTupleElement())))));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands7) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
window.58 = bf16[1,23,768]{2,1,0} parameter(0)
ROOT collective-permute.29 = bf16[1,23,768]{2,1,0} collective-permute(window.58), channel_id=100, source_target_pairs={{0,1},{1,0}}
}
%branch_false {
ROOT window.59 = bf16[1,23,768]{2,1,0} parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = bf16[1,23,768]{2,1,0} parameter(0)
%parameter.1 = bf16[1,23,768]{2,1,0} parameter(1)
%parameter.2 = pred[] parameter(2)
add.244 = bf16[1,23,768]{2,1,0} add(parameter.0, parameter.1)
ROOT conditional.1 = bf16[1,23,768]{2,1,0} conditional(%parameter.2, %add.244, %add.244), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands8) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
window.58 = bf16[1,23,768]{2,1,0} parameter(0)
ROOT collective-permute.29 = bf16[1,23,768]{2,1,0} collective-permute(window.58), channel_id=100, source_target_pairs={{0,1},{1,0}}
}
%branch_false {
ROOT window.59 = bf16[1,23,768]{2,1,0} parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = bf16[1,23,768]{2,1,0} parameter(0)
%parameter.1 = bf16[1,23,768]{2,1,0} parameter(1)
%parameter.2 = pred[] parameter(2)
add.244 = bf16[1,23,768]{2,1,0} add(parameter.0, parameter.1)
ROOT conditional.1 = bf16[1,23,768]{2,1,0} conditional(%parameter.2, %parameter.0, %add.244), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
VLOG(2) << module->ToString();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
EXPECT_EQ(root->branch_computation(0)->instruction_count(), 2);
EXPECT_EQ(root->branch_computation(1)->instruction_count(), 4);
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
EXPECT_THAT(conditional_false,
op::Add(op::GetTupleElement(), op::GetTupleElement()));
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands9) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
ROOT tmp = ((f32[], f32[])) parameter(0)
}
%branch_false {
ROOT tmp = ((f32[], f32[])) parameter(0)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = f32[] parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%add.0 = f32[] add(parameter.1, parameter.1)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %parameter.1, f32[] %power.1)
%multiply.13464 = f32[] multiply(f32[] %parameter.0, f32[] %multiply.13463)
%tuple.1 = (f32[], f32[]) tuple(add.0, add.0)
%tuple.2 = ((f32[], f32[])) tuple(%tuple.1)
%tuple.3 = (f32[], f32[]) tuple(multiply.13463, multiply.13464)
%tuple.4 = ((f32[], f32[])) tuple(tuple.3)
ROOT conditional.1 = ((f32[], f32[])) conditional(%parameter.2, %tuple.2, %tuple.4), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
const HloInstruction* conditional_true =
root->branch_computation(0)->root_instruction();
EXPECT_THAT(conditional_false->shape().tuple_shapes_size(), 1);
EXPECT_THAT(conditional_false->shape().tuple_shapes(0).tuple_shapes_size(),
2);
EXPECT_THAT(conditional_true->shape().tuple_shapes_size(), 1);
EXPECT_THAT(conditional_true->shape().tuple_shapes(0).tuple_shapes_size(), 2);
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands10) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
%branch_true {
tmp = ((f32[], f32[])) parameter(0)
tmp1 = (f32[], f32[]) get-tuple-element(tmp), index=0
tmp2 = f32[] get-tuple-element(tmp1), index=0
tmp3 = f32[] get-tuple-element(tmp1), index=1
add = f32[] add(tmp2, tmp3)
ROOT tuple = (f32[], (f32[], f32[])) tuple(add, tmp1)
}
%branch_false {
tmp = ((f32[], f32[])) parameter(0)
tmp1 = (f32[], f32[]) get-tuple-element(tmp), index=0
tmp2 = f32[] get-tuple-element(tmp1), index=0
ROOT tuple = (f32[], (f32[], f32[])) tuple(tmp2, tmp1)
}
%scalar_add_computation.1 (scalar_lhs.1: u32[], scalar_rhs.1: u32[]) -> u32[] {
%scalar_lhs.1 = u32[] parameter(0)
%scalar_rhs.1 = u32[] parameter(1)
ROOT %add.1 = u32[] add(u32[] %scalar_lhs.1, u32[] %scalar_rhs.1)
}
ENTRY %xla_computation {
%parameter.0 = f32[] parameter(0)
%parameter.1 = f32[] parameter(1)
%parameter.2 = pred[] parameter(2)
%constant.13862 = f32[] constant(0.00025)
%constant.13863 = f32[] constant(0.97)
%add.0 = f32[] add(parameter.1, parameter.1)
%floor.145 = f32[]{:T(256)} floor(f32[]{:T(256)} %parameter.0)
%power.1 = f32[] power(f32[] %constant.13863, f32[]{:T(256)} %floor.145)
%multiply.13463 = f32[] multiply(f32[] %parameter.1, f32[] %power.1)
%multiply.13464 = f32[] multiply(f32[] %parameter.0, f32[] %multiply.13463)
%tuple.1 = (f32[], f32[]) tuple(add.0, add.0)
%tuple.2 = ((f32[], f32[])) tuple(%tuple.1)
%tuple.3 = (f32[], f32[]) tuple(multiply.13463, multiply.13464)
%tuple.4 = ((f32[], f32[])) tuple(tuple.3)
ROOT conditional.1 = (f32[], (f32[], f32[])) conditional(%parameter.2, %tuple.2, %tuple.4), true_computation=branch_true, false_computation=branch_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional());
const HloInstruction* conditional_false =
root->branch_computation(1)->root_instruction();
const HloInstruction* conditional_true =
root->branch_computation(0)->root_instruction();
EXPECT_THAT(conditional_false->shape().tuple_shapes_size(), 2);
EXPECT_THAT(conditional_false->shape().tuple_shapes(1).tuple_shapes_size(),
2);
EXPECT_THAT(conditional_true->shape().tuple_shapes_size(), 2);
EXPECT_THAT(conditional_true->shape().tuple_shapes(1).tuple_shapes_size(), 2);
}
TEST_F(ConditionalCodeMotionTest, MovePartialyUsedOperands11) {
absl::string_view hlo_string =
R"(
HloModule xla_computation
region_2.494 {
Arg_.495 = (u32[], u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(Arg_.495), index=1, metadata={op_type="Less" op_name="cond_1/Less"}
bitcast-convert = s32[] bitcast-convert(get-tuple-element), metadata={op_type="Less" op_name="cond_1/Less"}
constant.172 = s32[] constant(0), metadata={op_type="Less" op_name="cond_1/Less"}
compare = pred[] compare(bitcast-convert, constant.172), direction=LT, metadata={op_type="Less" op_name="cond_1/Less"}
constant.1 = u32[] constant(0)
compare.1 = pred[] compare(get-tuple-element, constant.1), direction=EQ, metadata={op_type="Less" op_name="cond_1/Less"}
get-tuple-element.2 = u32[] get-tuple-element(Arg_.495), index=0, metadata={op_type="Less" op_name="cond_1/Less"}
constant = u32[] constant(25000), metadata={op_type="Less" op_name="cond_1/Less"}
compare.2 = pred[] compare(get-tuple-element.2, constant), direction=LT, metadata={op_type="Less" op_name="cond_1/Less"}
and = pred[] and(compare.1, compare.2), metadata={op_type="Less" op_name="cond_1/Less"}
or = pred[] or(compare, and), metadata={op_type="Less" op_name="cond_1/Less"}
ROOT tuple.1 = (pred[]) tuple(or)
}
region_3.498 {
Arg_.499 = pred[] parameter(0)
ROOT tuple.2 = (pred[]) tuple(Arg_.499)
}
ENTRY %xla_computation {
custom-call = u32[]{:T(256)} parameter(0)
bitcast-convert.31 = s32[]{:T(256)} parameter(1)
constant.202 = s32[]{:T(256)} parameter(2)
constant.21 = u32[]{:T(256)} parameter(3)
custom-call.1 = u32[]{:T(256)} parameter(4)
compare.38 = pred[]{:T(256)} compare(bitcast-convert.31, constant.202), direction=GT, metadata={op_type="GreaterEqual" op_name="GreaterEqual"}
compare.39 = pred[]{:T(256)} compare(custom-call, constant.21), direction=EQ, metadata={op_type="GreaterEqual" op_name="GreaterEqual"}
or.17 = pred[]{:T(256)} or(compare.38, compare.39), metadata={op_type="GreaterEqual" op_name="GreaterEqual"}
tuple.20 = (u32[]{:T(256)}, u32[]{:T(256)}) tuple(custom-call.1, custom-call), sharding={{maximal device=0}, {maximal device=0}}
ROOT conditional = (pred[]) conditional(or.17, tuple.20, or.17), true_computation=region_2.494, false_computation=region_3.498, metadata={op_type="If" op_name="cond_1"}
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
pass.Run(&*module).value();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Conditional(op::Or(), op::Tuple(), op::Or()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3bb57b45-0742-480e-8a24-200b67ed49fa | cpp | tensorflow/tensorflow | hlo_element_type_converter | third_party/xla/xla/service/hlo_element_type_converter.cc | third_party/xla/xla/service/hlo_element_type_converter_test.cc | #include "xla/service/hlo_element_type_converter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
HloInstruction* ToElementType(HloInstruction* hlo, PrimitiveType type) {
if (hlo->shape().element_type() != type) {
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateConvert(shape, hlo));
}
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
bool HasOperandType(HloInstruction* hlo, PrimitiveType type) {
for (HloInstruction* operand : hlo->operands()) {
if (operand->shape().element_type() == type) {
return true;
}
}
return false;
}
Shape GetConvertedTupleShape(const Shape& shape, PrimitiveType from_type,
PrimitiveType to_type) {
std::vector<Shape> new_tuple_subshapes;
const int64_t n = ShapeUtil::TupleElementCount(shape);
new_tuple_subshapes.reserve(n);
for (int64_t i = 0; i < n; ++i) {
Shape subshape = ShapeUtil::GetTupleElementShape(shape, i);
CHECK(!subshape.IsTuple());
if (subshape.element_type() == from_type) {
subshape = ShapeUtil::ChangeElementType(subshape, to_type);
}
new_tuple_subshapes.push_back(subshape);
}
return ShapeUtil::MakeTupleShape(new_tuple_subshapes);
}
HloInstruction* ConvertTupleElements(HloInstruction* hlo,
const Shape& to_shape) {
const Shape& shape = hlo->shape();
HloComputation* computation = hlo->parent();
std::vector<HloInstruction*> tuple_elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& ele_shape = ShapeUtil::GetTupleElementShape(shape, i);
HloInstruction* element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(ele_shape, hlo, i));
const Shape& to_ele_shape = ShapeUtil::GetTupleElementShape(to_shape, i);
CHECK(!ele_shape.IsTuple());
if (ele_shape.element_type() != to_ele_shape.element_type()) {
element = computation->AddInstruction(
HloInstruction::CreateConvert(to_ele_shape, element));
}
tuple_elements.push_back(element);
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
}
HloElementTypeConverter::HloElementTypeConverter(
PrimitiveType eliminate_type, PrimitiveType replace_with_type)
: eliminate_type_(eliminate_type), replace_with_type_(replace_with_type) {}
absl::StatusOr<bool> HloElementTypeConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
3, "HloElementTypeConverter::Run(), before:\n" + module->ToString());
if (eliminate_type_ == replace_with_type_) {
return false;
}
HloCloneContext context(module);
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
for (auto* hlo : computation->MakeInstructionPostOrder()) {
const auto opcode = hlo->opcode();
if (opcode == HloOpcode::kParameter || opcode == HloOpcode::kConstant ||
opcode == HloOpcode::kTuple || opcode == HloOpcode::kConvert ||
opcode == HloOpcode::kBitcastConvert ||
opcode == HloOpcode::kGetTupleElement ||
opcode == HloOpcode::kInfeed || opcode == HloOpcode::kOutfeed) {
continue;
}
if (opcode == HloOpcode::kCustomCall) {
continue;
}
if (opcode == HloOpcode::kWhile || opcode == HloOpcode::kCall ||
opcode == HloOpcode::kAllReduce ||
opcode == HloOpcode::kReduceScatter ||
opcode == HloOpcode::kAllReduceStart ||
opcode == HloOpcode::kFusion || opcode == HloOpcode::kMap ||
opcode == HloOpcode::kReduce || opcode == HloOpcode::kReduceWindow ||
opcode == HloOpcode::kScatter ||
opcode == HloOpcode::kSelectAndScatter ||
opcode == HloOpcode::kSort || opcode == HloOpcode::kConditional) {
continue;
}
TF_RET_CHECK(hlo->called_computations().empty()) << hlo->ToString();
bool nullary = hlo->operands().empty();
bool wrong_element_type = hlo->shape().element_type() == eliminate_type_;
bool should_eliminate_type = (nullary && wrong_element_type) ||
HasOperandType(hlo, eliminate_type_);
if (!should_eliminate_type) {
TF_RET_CHECK(hlo->shape().element_type() != eliminate_type_);
continue;
}
std::vector<HloInstruction*> new_operands;
const auto& operands = hlo->operands();
new_operands.reserve(operands.size());
for (HloInstruction* operand : operands) {
if (operand->shape().element_type() == eliminate_type_) {
operand = ToElementType(operand, replace_with_type_);
}
new_operands.push_back(operand);
}
HloInstruction* new_hlo;
if (hlo->shape().element_type() == eliminate_type_) {
Shape shape =
ShapeUtil::ChangeElementType(hlo->shape(), replace_with_type_);
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(shape, new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
new_hlo = ToElementType(new_hlo, eliminate_type_);
} else if (hlo->shape().IsTuple()) {
Shape old_shape = hlo->shape();
Shape new_shape = GetConvertedTupleShape(hlo->shape(), eliminate_type_,
replace_with_type_);
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(new_shape, new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
new_hlo = ConvertTupleElements(new_hlo, old_shape);
} else {
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
}
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_hlo));
TF_RETURN_IF_ERROR(hlo->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation->RemoveInstruction(hlo));
changed = true;
}
}
XLA_VLOG_LINES(
2, "HloElementTypeConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/hlo_element_type_converter.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Not;
using ::testing::ResultOf;
using HloElementTypeConverterTest = HloTestBase;
TEST_F(HloElementTypeConverterTest, CustomCallsNotConverted) {
const std::string& hlo_string = R"(
HloModule custom_call
ENTRY CustomCall {
constant = bf16[1]{0} constant({12345})
ROOT custom-call = bf16[1,2,3]{0,2,1} custom-call(constant),
custom_call_target="foo"
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_FALSE(converted);
}
TEST_F(HloElementTypeConverterTest, InfeedsOutfeedsNotConverted) {
const std::string& hlo_string = R"(
HloModule InfeedOutfeed
ENTRY RoundTrip16MiBR1.v2 {
token0 = token[] after-all()
infeed = (bf16[4]{0}, token[]) infeed(token0)
ROOT infeed.data = bf16[4]{0} get-tuple-element(infeed), index=0
outfeed = token[] outfeed(infeed.data, token0)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_FALSE(converted);
}
TEST_F(HloElementTypeConverterTest, OperationsInNestedTuplesConverted) {
const std::string& hlo_string = R"(
HloModule NestedTuples
ENTRY NestedTuples.v5 {
constant.2 = f32[2]{0} constant({1, 2})
constant.3 = bf16[2]{0} constant({42, 42})
add = bf16[2]{0} add(constant.2, constant.3)
tuple = (f32[2]{0}, bf16[2]{0}) tuple(constant.2, add)
constant.5 = bf16[2]{0} constant({22, 44})
ROOT tuple.1 = ((f32[2]{0}, bf16[2]{0}), bf16[2]{0}) tuple(tuple, constant.5)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
const HloInstruction* bf16_op =
module->entry_computation()->root_instruction()->operand(0)->operand(1);
EXPECT_THAT(bf16_op, op::Convert(op::Add(op::Constant(), op::Convert())));
}
TEST_F(HloElementTypeConverterTest, BatchNormGradBF16Converted) {
const std::string& hlo_string = R"(
HloModule BatchNormGrad
ENTRY BatchNormGrad.v6 {
constant.4 = bf16[2,2,2,1]{3,2,1,0} constant({ {
{ {0}, {0} }, { {0}, {0} } }, { { {0},
{0} }, { {0}, {0} } } })
constant.5 = bf16[2]{0} constant({1, 1})
constant.6 = bf16[2]{0} constant({0, 0})
constant.7 = bf16[2]{0} constant({1, 1})
constant.8 = bf16[2,2,2,1]{3,2,1,0} constant({ {
{ {1}, {2} }, { {3}, {4} } }, { {
{5}, {6} }, { {7}, {8} } } })
ROOT batch-norm-grad = (bf16[2,2,2,1]{3,2,1,0}, bf16[2]{0}, bf16[2]{0})
batch-norm-grad(constant.4, constant.5, constant.6, constant.7,
constant.8), epsilon=0, feature_index=2
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
const HloInstruction* tuple_instr =
module->entry_computation()->root_instruction();
::testing::Matcher<const ::xla::HloInstruction*> batch_norm =
op::BatchNormGrad();
EXPECT_THAT(tuple_instr,
op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)),
op::Convert(op::GetTupleElement(batch_norm, 1)),
op::Convert(op::GetTupleElement(batch_norm, 2))));
}
TEST_F(HloElementTypeConverterTest, RngIsRemoved) {
const std::string& hlo_string = R"(
HloModule RngIsRemoved
ENTRY main {
constant.3 = bf16[] constant(0)
constant.4 = bf16[] constant(1)
ROOT rng = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
HloPredicate is_bf16_rng = [](const HloInstruction* inst) {
return inst->shape().element_type() == BF16 &&
inst->opcode() == HloOpcode::kRng;
};
EXPECT_THAT(module->entry_computation()->instructions(),
Not(Contains(ResultOf(is_bf16_rng, Eq(true)))));
}
TEST_F(HloElementTypeConverterTest, RngCtrlDep) {
const std::string& hlo_string = R"(
HloModule RngIsRemoved
ENTRY main {
constant.3 = bf16[] constant(0)
constant.4 = bf16[] constant(1)
rng0 = bf16[1,2000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform
ROOT rng1 = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), control-predecessors={%rng0}, distribution=rng_uniform
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
HloInstruction *rng0, *rng1;
for (auto* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kRng) {
const Shape& shape = inst->shape();
ASSERT_EQ(shape.dimensions_size(), 3);
ASSERT_TRUE(shape.dimensions(1) == 2000 || shape.dimensions(1) == 1000);
if (shape.dimensions(1) == 2000) {
rng0 = inst;
} else {
rng1 = inst;
}
}
}
EXPECT_THAT(rng0->control_successors(), ElementsAre(rng1));
EXPECT_THAT(rng1->control_predecessors(), ElementsAre(rng0));
}
TEST_F(HloElementTypeConverterTest, BitcastConvertIsUnmodified) {
const std::string& hlo_string = R"(
HloModule test
ENTRY test {
p = bf16[] parameter(0)
ROOT c = u16[] bitcast-convert(p)
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, RunHloPass(&converter, module.get()));
EXPECT_FALSE(converted);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a6eab818-9798-47e4-abf3-ff1e9416308e | cpp | tensorflow/tensorflow | stable_sort_expander | third_party/xla/xla/service/stable_sort_expander.cc | third_party/xla/xla/service/stable_sort_expander_test.cc | #include "xla/service/stable_sort_expander.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
int64_t StableSortExpander::IotaOperandIndexForStableSort(
const HloSortInstruction& sort) {
for (const HloInstruction* operand : sort.operands()) {
if (operand->opcode() == HloOpcode::kIota &&
Cast<HloIotaInstruction>(operand)->iota_dimension() ==
sort.sort_dimension() &&
operand->shape().element_type() == S32) {
return sort.operand_index(operand);
}
}
return -1;
}
absl::StatusOr<HloInstruction*> StableSortExpander::ExpandInstruction(
HloInstruction* instruction) {
auto* sort = Cast<HloSortInstruction>(instruction);
HloComputation* computation = sort->parent();
HloInstruction* expanded_sort = nullptr;
int64_t iota_index = IotaOperandIndexForStableSort(*sort);
if (iota_index == -1) {
Shape iota_shape = sort->operand(0)->shape();
if (iota_shape.dimensions(sort->sort_dimension()) >
std::numeric_limits<int32_t>::max()) {
return Unimplemented(
"Stable sorting of more than 2^31-1 elements is not implemented");
}
iota_shape.set_element_type(S32);
auto iota = computation->AddInstruction(
HloInstruction::CreateIota(iota_shape, sort->sort_dimension()));
auto comparator = sort->to_apply();
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
std::vector<std::unique_ptr<HloInstruction>> extra_parameters;
std::vector<HloInstruction*> extra_parameter_ptrs;
Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
extra_parameters.push_back(HloInstruction::CreateParameter(
sort->operand_count() * 2, scalar_shape,
absl::StrCat("p.", sort->operand_count(), ".lhs")));
extra_parameter_ptrs.push_back(extra_parameters.back().get());
extra_parameters.push_back(HloInstruction::CreateParameter(
sort->operand_count() * 2 + 1, scalar_shape,
absl::StrCat("p.", sort->operand_count(), ".rhs")));
extra_parameter_ptrs.push_back(extra_parameters.back().get());
sort->set_to_apply(sort->GetModule()->AddEmbeddedComputation(
comparator->CloneWithReplacements(&replacements,
extra_parameter_ptrs)));
std::vector<HloInstruction*> new_operands(sort->operands().begin(),
sort->operands().end());
new_operands.push_back(iota);
std::vector<Shape> new_shapes = sort->operand_count() == 1
? std::vector<Shape>{sort->shape()}
: sort->shape().tuple_shapes();
new_shapes.push_back(iota_shape);
Shape new_sort_shape = ShapeUtil::MakeTupleShape(new_shapes);
HloInstruction* new_sort = computation->AddInstruction(
sort->CloneWithNewOperands(new_sort_shape, new_operands));
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
tuple_elements.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
sort->operand(i)->shape(), new_sort, i)));
}
expanded_sort = tuple_elements[0];
if (tuple_elements.size() > 1) {
expanded_sort = computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
sort = Cast<HloSortInstruction>(new_sort);
iota_index = sort->operand_count() - 1;
}
auto comparator = sort->to_apply();
std::vector<HloInstruction*> instructions_postorder =
comparator->MakeInstructionPostOrder();
absl::flat_hash_map<HloInstruction*, HloInstruction*> replacements;
auto replace = [&](HloInstruction* instr) {
auto it = replacements.find(instr);
if (it == replacements.end()) {
return instr;
}
return it->second;
};
HloInstruction* old_root = comparator->root_instruction();
for (int64_t i = 0; i < comparator->num_parameters(); ++i) {
replacements[comparator->parameter_instruction(i)] =
comparator->parameter_instruction(i ^ 1);
}
HloInstruction* cloned_root = nullptr;
for (HloInstruction* inst : instructions_postorder) {
if (inst->operand_count() == 0) {
continue;
}
std::vector<HloInstruction*> new_operands;
new_operands.reserve(inst->operand_count());
for (HloInstruction* operand : inst->operands()) {
new_operands.push_back(replace(operand));
}
auto new_instruction =
inst->CloneWithNewOperands(inst->shape(), new_operands);
replacements[inst] = new_instruction.get();
if (inst == old_root) {
cloned_root = new_instruction.get();
}
comparator->AddInstruction(std::move(new_instruction));
}
CHECK_NE(cloned_root, nullptr);
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
HloInstruction* same =
comparator->AddInstruction(HloInstruction::CreateCompare(
scalar_pred, old_root, cloned_root, ComparisonDirection::kEq));
HloInstruction* tie_breaker =
comparator->AddInstruction(HloInstruction::CreateCompare(
scalar_pred, comparator->parameter_instruction(2 * iota_index),
comparator->parameter_instruction(2 * iota_index + 1),
ComparisonDirection::kLt));
HloInstruction* new_root =
comparator->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kSelect, same, tie_breaker,
old_root));
comparator->set_root_instruction(new_root);
return expanded_sort;
}
bool StableSortExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kSort &&
Cast<HloSortInstruction>(instruction)->is_stable();
}
} | #include "xla/service/stable_sort_expander.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using StableSortExpanderTest = HloTestBase;
bool IsSameComputationExceptParams(const HloInstruction* a,
const HloInstruction* b) {
if (a->opcode() != b->opcode() || a->operand_count() != b->operand_count()) {
return false;
}
if (a->opcode() == HloOpcode::kParameter) {
return a->parameter_number() == (b->parameter_number() ^ 1);
}
if (a->operand_count() == 0) {
return a == b;
}
for (int64_t i = 0; i < a->operand_count(); ++i) {
if (!IsSameComputationExceptParams(a->operand(i), b->operand(i))) {
return false;
}
}
return true;
}
void CheckComputationHasTieBreaker(const HloInstruction* root,
int64_t iota_parameter) {
ASSERT_EQ(root->opcode(), HloOpcode::kSelect);
ASSERT_EQ(root->operand(0)->opcode(), HloOpcode::kCompare);
ASSERT_EQ(root->operand(0)->comparison_direction(), ComparisonDirection::kEq);
EXPECT_THAT(root->operand(1),
GmockMatch(m::Lt(m::Parameter(iota_parameter * 2),
m::Parameter(iota_parameter * 2 + 1))));
EXPECT_EQ(root->operand(2), root->operand(0)->operand(0));
EXPECT_TRUE(IsSameComputationExceptParams(root->operand(0)->operand(0),
root->operand(0)->operand(1)));
}
TEST_F(StableSortExpanderTest, StabilizeSortReuseIotaOperand) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest,
StabilizeSortReuseIotaOperandComplicatedComparison) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
max = u32[] constant(2147483647)
zero = s32[] constant(0)
lhs.signed = s32[] bitcast-convert(p.0.lhs)
lhs.unsigned = u32[] bitcast-convert(p.0.lhs)
lhs.flipped = u32[] subtract(max, lhs.unsigned)
lhs.flipped.signed = s32[] bitcast-convert(lhs.flipped)
lhs.is_negative = pred[] compare(lhs.flipped.signed, zero), direction=LT
lhs.converted = s32[] select(lhs.is_negative, lhs.flipped.signed, lhs.signed)
rhs.signed = s32[] bitcast-convert(p.0.rhs)
rhs.unsigned = u32[] bitcast-convert(p.0.rhs)
rhs.flipped = u32[] subtract(max, rhs.unsigned)
rhs.flipped.signed = s32[] bitcast-convert(rhs.flipped)
rhs.is_negative = pred[] compare(rhs.flipped.signed, zero), direction=LT
rhs.converted = s32[] select(rhs.is_negative, rhs.flipped.signed, rhs.signed)
ROOT lt = pred[] compare(lhs.converted, rhs.converted), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest, StabilizeSortAddIotaOperandAndChangeRoot) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
ROOT sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, GmockMatch(m::Tuple(
m::GetTupleElement(
m::Sort(m::Parameter(0), m::Parameter(1), m::Iota()), 0),
m::GetTupleElement(
m::Sort(m::Parameter(0), m::Parameter(1), m::Iota()), 1))));
CheckComputationHasTieBreaker(
root->operand(0)->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, HonorIsStableFlag) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=false
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_FALSE(stabilizer.Run(module.get()).value());
}
TEST_F(StableSortExpanderTest,
StabilizeSortDontReuseIotaOperandWrongDimension) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=0
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
AlgebraicSimplifier simplifier(AlgebraicSimplifierOptions(
[](const Shape&, const Shape&) { return false; }));
ASSERT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota(), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, StabilizeSortDontReuseIotaOperandWrongType) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = f32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, f32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
AlgebraicSimplifier simplifier(AlgebraicSimplifierOptions(
[](const Shape&, const Shape&) { return false; }));
ASSERT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota(), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, StabilizeSortR1) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
mask = s32[] constant(65535)
lhs = s32[] and(p.0.lhs, mask)
rhs = s32[] and(p.0.rhs, mask)
ROOT lt = pred[] compare(lhs, rhs), direction=LT
}
ENTRY sort_computation {
keys = s32[64,8732]{1,0} parameter(0)
ROOT sort = s32[64,8732]{1,0} sort(keys), dimensions={0}, to_apply=compare,
is_stable=true
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest, StabilizeSortR1NoRoot) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
mask = s32[] constant(65535)
lhs = s32[] and(p.0.lhs, mask)
rhs = s32[] and(p.0.rhs, mask)
ROOT lt = pred[] compare(lhs, rhs), direction=LT
}
ENTRY sort_computation {
keys = s32[64,8732]{1,0} parameter(0)
sort = s32[64,8732]{1,0} sort(keys), dimensions={0}, to_apply=compare,
is_stable=true
ROOT neg = s32[64,8732]{1,0} negate(sort)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Negate(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0))));
CheckComputationHasTieBreaker(
root->operand(0)->operand(0)->to_apply()->root_instruction(),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stable_sort_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stable_sort_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
62682d29-cb01-430f-b4b6-1d1f6b377046 | cpp | tensorflow/tensorflow | float_normalization | third_party/xla/xla/service/float_normalization.cc | third_party/xla/xla/service/float_normalization_test.cc | #include "xla/service/float_normalization.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class FloatNormalizationVisitor : public DfsHloVisitorWithDefault {
public:
explicit FloatNormalizationVisitor(const FloatSupport* float_support,
FloatNormalization* float_normalization)
: computation_(nullptr),
float_support_(float_support),
float_normalization_(float_normalization) {}
bool changed() const { return changed_; }
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status Preprocess(HloInstruction* hlo) override;
private:
absl::Status HandleInstruction(HloInstruction* hlo);
absl::Status HandleMultipleOutputs(HloInstruction* hlo);
absl::StatusOr<HloInstruction*> ConvertType(HloInstruction* hlo,
PrimitiveType from,
PrimitiveType to,
HloComputation* computation);
absl::Status InsertConvertAfterOutput(HloInstruction* hlo, PrimitiveType from,
PrimitiveType to,
HloComputation* computation);
absl::Status ChangeOutputTypeThenInsertConvertBack(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation);
absl::Status InsertConvertBeforeOperand(HloInstruction* hlo,
int64_t operand_idx,
PrimitiveType from, PrimitiveType to,
HloComputation* computation);
absl::Status ConvertCalledComputations(
HloInstruction* hlo,
absl::Span<HloComputation* const> low_precision_called_comps);
PrimitiveType LowPrecisionType() const {
return float_support_->LowPrecisionType();
}
PrimitiveType HighPrecisionType() const {
return float_support_->HighPrecisionType();
}
HloComputation* computation_;
const FloatSupport* float_support_;
FloatNormalization* float_normalization_;
bool changed_ = false;
};
int64_t CountSubshapesWithMatchingType(const Shape& shape, PrimitiveType type) {
int64_t count = 0;
ShapeUtil::ForEachSubshape(
shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() == type) {
++count;
}
});
return count;
}
int64_t ShapeLeafCount(const Shape& shape) {
int64_t count = 0;
ShapeUtil::ForEachSubshape(
shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (ShapeUtil::IsLeafIndex(shape, index)) {
++count;
}
});
return count;
}
absl::StatusOr<HloInstruction*> FloatNormalizationVisitor::ConvertType(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation) {
if (CountSubshapesWithMatchingType(hlo->shape(), from) == 0) {
return hlo;
}
if (hlo->opcode() == HloOpcode::kConvert &&
hlo->operand(0)->shape().element_type() == to &&
to == LowPrecisionType() && from == HighPrecisionType()) {
return hlo->mutable_operand(0);
}
TF_ASSIGN_OR_RETURN(
auto new_hlo,
computation->DeepCopyInstructionWithCustomCopier(
hlo, [&](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
const auto& original_subshape =
ShapeUtil::GetSubshape(hlo->shape(), leaf_index);
if (original_subshape.element_type() != from) {
return leaf;
}
auto new_subshape =
ShapeUtil::ChangeElementType(original_subshape, to);
float_normalization_->UpdateLayout(&new_subshape);
return computation->AddInstruction(
HloInstruction::CreateConvert(new_subshape, leaf));
}));
return new_hlo;
}
absl::Status FloatNormalizationVisitor::InsertConvertAfterOutput(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation) {
bool is_root = computation->root_instruction() == hlo;
std::vector<HloInstruction*> materialized_users = hlo->users();
TF_ASSIGN_OR_RETURN(auto new_hlo, ConvertType(hlo, from, to, computation));
if (new_hlo == hlo) {
return absl::OkStatus();
}
for (auto* user : materialized_users) {
TF_RETURN_IF_ERROR(hlo->ReplaceUseWithDifferentShape(user, new_hlo));
}
if (is_root) {
computation->set_root_instruction(new_hlo, true);
}
changed_ = true;
return absl::OkStatus();
}
absl::Status FloatNormalizationVisitor::ChangeOutputTypeThenInsertConvertBack(
HloInstruction* hlo, PrimitiveType from, PrimitiveType to,
HloComputation* computation) {
auto original_shape = hlo->shape();
if (CountSubshapesWithMatchingType(original_shape, from) == 0) {
return absl::OkStatus();
}
bool is_root = computation->root_instruction() == hlo;
std::optional<HloInputOutputAliasConfig> alias_config;
HloModule* module = computation->parent();
if (is_root && module->has_entry_computation() &&
module->entry_computation() == computation) {
alias_config = module->input_output_alias_config();
}
ShapeUtil::ForEachMutableSubshape(
hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) {
if (subshape->element_type() == from) {
subshape->set_element_type(to);
}
});
float_normalization_->UpdateLayout(hlo->mutable_shape());
std::vector<HloInstruction*> materialized_users = hlo->users();
TF_ASSIGN_OR_RETURN(
auto new_hlo,
computation->DeepCopyInstructionWithCustomCopier(
hlo, [&](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
const auto& original_subshape =
ShapeUtil::GetSubshape(original_shape, leaf_index);
if (original_subshape.element_type() ==
leaf->shape().element_type()) {
return leaf;
}
return computation->AddInstruction(
HloInstruction::CreateConvert(original_subshape, leaf));
}));
std::vector<HloInstruction*> conversions_to_simplify;
for (auto* user : materialized_users) {
if (user->opcode() == HloOpcode::kConvert &&
user->shape().element_type() == to && to == HighPrecisionType() &&
from == LowPrecisionType()) {
conversions_to_simplify.emplace_back(user);
} else {
TF_RETURN_IF_ERROR(hlo->ReplaceUseWithDifferentShape(user, new_hlo));
}
}
for (auto* convert : conversions_to_simplify) {
TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(hlo));
}
if (is_root) {
computation->set_root_instruction(new_hlo, true);
if (alias_config.has_value()) {
module->set_input_output_alias_config(*alias_config);
}
}
changed_ = true;
return absl::OkStatus();
}
absl::Status FloatNormalizationVisitor::InsertConvertBeforeOperand(
HloInstruction* hlo, int64_t operand_idx, PrimitiveType from,
PrimitiveType to, HloComputation* computation) {
auto operand = hlo->mutable_operand(operand_idx);
TF_ASSIGN_OR_RETURN(auto new_operand,
ConvertType(operand, from, to, computation));
if (new_operand == operand) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWithDifferentShape(operand_idx, new_operand));
changed_ = true;
return absl::OkStatus();
}
absl::Status FloatNormalizationVisitor::ConvertCalledComputations(
HloInstruction* hlo,
absl::Span<HloComputation* const> low_precision_called_comps) {
absl::flat_hash_map<HloComputation*, HloComputation*> cloned_computations;
for (auto& comp : low_precision_called_comps) {
auto cloned = comp->parent()->AddEmbeddedComputation(comp->Clone());
cloned_computations[comp] = cloned;
changed_ = true;
}
hlo->ReplaceCalledComputations([&](HloComputation* comp) {
auto it = cloned_computations.find(comp);
if (it != cloned_computations.end()) {
return it->second;
}
return comp;
});
for (auto& comp_pair : cloned_computations) {
auto comp = comp_pair.second;
TF_RETURN_IF_ERROR(InsertConvertAfterOutput(comp->root_instruction(),
LowPrecisionType(),
HighPrecisionType(), comp));
for (auto* param : comp->parameter_instructions()) {
TF_RETURN_IF_ERROR(ChangeOutputTypeThenInsertConvertBack(
param, LowPrecisionType(), HighPrecisionType(), comp));
}
}
return absl::OkStatus();
}
bool ShouldAvoidNormalizingComputationsForInstruction(HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kReduceScatter;
}
absl::Status FloatNormalizationVisitor::HandleMultipleOutputs(
HloInstruction* hlo) {
std::vector<PrimitiveType> operand_types(hlo->operand_count());
std::vector<PrimitiveType> output_types(hlo->operand_count());
int64_t high_prec_count = 0;
int64_t low_prec_count = 0;
bool has_unsupported_low_prec_operand = false;
bool has_unsupported_low_prec_output = false;
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
CHECK(hlo->operand(i)->shape().IsArray());
CHECK(ShapeUtil::GetSubshape(hlo->shape(), {i}).IsArray());
operand_types[i] = hlo->operand(i)->shape().element_type();
output_types[i] = ShapeUtil::GetSubshape(hlo->shape(), {i}).element_type();
if (operand_types[i] == HighPrecisionType()) {
high_prec_count += 1;
} else if (operand_types[i] == LowPrecisionType()) {
low_prec_count += 1;
if (!float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
has_unsupported_low_prec_operand = true;
}
}
if (output_types[i] == HighPrecisionType()) {
high_prec_count += 1;
} else if (output_types[i] == LowPrecisionType()) {
low_prec_count += 1;
if (!float_support_->SupportsLowPrecisionOutput(*hlo)) {
has_unsupported_low_prec_output = true;
}
}
}
if (low_prec_count == 0) {
return absl::OkStatus();
}
auto should_convert_operand = [&](int64_t i) {
if (operand_types[i] != LowPrecisionType()) {
return false;
}
if (!float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
return true;
}
if (float_support_->SupportsMixedPrecisions(*hlo)) {
return false;
}
return has_unsupported_low_prec_operand ||
has_unsupported_low_prec_output || high_prec_count > 0;
};
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
if (should_convert_operand(i)) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, LowPrecisionType(), HighPrecisionType(), computation_));
high_prec_count += 1;
low_prec_count -= 1;
}
}
if (!has_unsupported_low_prec_output &&
(float_support_->SupportsMixedPrecisions(*hlo) || high_prec_count == 0 ||
low_prec_count == 0)) {
return absl::OkStatus();
}
std::vector<HloComputation*> low_precision_called_comps;
for (auto* comp : hlo->called_computations()) {
if (ShouldAvoidNormalizingComputationsForInstruction(hlo)) {
continue;
}
bool comp_has_low_precision = false;
if (comp->root_instruction()->shape().element_type() ==
HighPrecisionType()) {
high_prec_count += 1;
} else if (comp->root_instruction()->shape().element_type() ==
LowPrecisionType()) {
low_prec_count += 1;
comp_has_low_precision = true;
}
for (auto* param : comp->parameter_instructions()) {
if (param->shape().element_type() == HighPrecisionType()) {
high_prec_count += 1;
} else if (param->shape().element_type() == LowPrecisionType()) {
low_prec_count += 1;
comp_has_low_precision = true;
}
}
if (comp_has_low_precision) {
low_precision_called_comps.push_back(comp);
}
}
std::vector<HloInstruction*> materialized_users = hlo->users();
std::vector<HloInstruction*> output_elements(hlo->operand_count());
auto original_shape = hlo->shape();
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i});
if (output_types[i] != LowPrecisionType()) {
output_elements[i] = computation_->AddInstruction(
HloInstruction::CreateGetTupleElement(*subshape, hlo, i));
continue;
}
subshape->set_element_type(HighPrecisionType());
float_normalization_->UpdateLayout(subshape);
auto gte = computation_->AddInstruction(
HloInstruction::CreateGetTupleElement(*subshape, hlo, i));
auto shape = ShapeUtil::ChangeElementType(*subshape, LowPrecisionType());
float_normalization_->UpdateLayout(&shape);
output_elements[i] =
computation_->AddInstruction(HloInstruction::CreateConvert(shape, gte));
}
auto tuple = computation_->AddInstruction(
HloInstruction::CreateTuple(output_elements));
*tuple->mutable_shape() = hlo->shape();
for (auto* user : materialized_users) {
TF_RETURN_IF_ERROR(hlo->ReplaceUseWith(user, tuple));
}
bool is_root = computation_->root_instruction() == hlo;
if (is_root) {
computation_->set_root_instruction(tuple);
}
*tuple->mutable_shape() = original_shape;
return ConvertCalledComputations(hlo, low_precision_called_comps);
}
absl::Status FloatNormalizationVisitor::HandleInstruction(HloInstruction* hlo) {
int high_prec_count = 0;
int low_prec_count = 0;
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
high_prec_count += CountSubshapesWithMatchingType(hlo->operand(i)->shape(),
HighPrecisionType());
low_prec_count += CountSubshapesWithMatchingType(hlo->operand(i)->shape(),
LowPrecisionType());
}
high_prec_count +=
CountSubshapesWithMatchingType(hlo->shape(), HighPrecisionType());
low_prec_count +=
CountSubshapesWithMatchingType(hlo->shape(), LowPrecisionType());
std::vector<HloComputation*> low_precision_called_comps;
for (auto* comp : hlo->called_computations()) {
if (ShouldAvoidNormalizingComputationsForInstruction(hlo)) {
continue;
}
bool comp_has_low_precision = false;
high_prec_count += CountSubshapesWithMatchingType(
comp->root_instruction()->shape(), HighPrecisionType());
int64_t low_prec_count_comp_root = CountSubshapesWithMatchingType(
comp->root_instruction()->shape(), LowPrecisionType());
if (low_prec_count_comp_root > 0) {
low_prec_count += low_prec_count_comp_root;
comp_has_low_precision = true;
}
for (auto* param : comp->parameter_instructions()) {
high_prec_count +=
CountSubshapesWithMatchingType(param->shape(), HighPrecisionType());
int64_t low_prec_count_comp_param =
CountSubshapesWithMatchingType(param->shape(), LowPrecisionType());
if (low_prec_count_comp_param > 0) {
low_prec_count += low_prec_count_comp_param;
comp_has_low_precision = true;
}
}
if (comp_has_low_precision) {
low_precision_called_comps.push_back(comp);
}
}
for (int i = 0; i < hlo->operand_count(); ++i) {
int64_t low_prec_count_in_operand = CountSubshapesWithMatchingType(
hlo->operand(i)->shape(), LowPrecisionType());
if (low_prec_count_in_operand > 0 &&
!float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, LowPrecisionType(), HighPrecisionType(), computation_));
low_prec_count -= low_prec_count_in_operand;
high_prec_count += low_prec_count_in_operand;
}
}
if (!float_support_->SupportsLowPrecisionOutput(*hlo)) {
int64_t low_prec_count_in_hlo =
CountSubshapesWithMatchingType(hlo->shape(), LowPrecisionType());
if (low_prec_count_in_hlo > 0) {
TF_RETURN_IF_ERROR(ChangeOutputTypeThenInsertConvertBack(
hlo, LowPrecisionType(), HighPrecisionType(), computation_));
low_prec_count -= low_prec_count_in_hlo;
high_prec_count += low_prec_count_in_hlo;
}
}
if (float_support_->SupportsMixedPrecisions(*hlo) || low_prec_count == 0 ||
high_prec_count == 0) {
return absl::OkStatus();
}
if (hlo->called_computations().empty() &&
CountSubshapesWithMatchingType(hlo->shape(), LowPrecisionType()) ==
ShapeLeafCount(hlo->shape())) {
bool can_use_low_prec = true;
for (int i = 0; i < hlo->operand_count(); ++i) {
if (CountSubshapesWithMatchingType(hlo->operand(i)->shape(),
LowPrecisionType()) ==
ShapeLeafCount(hlo->operand(i)->shape())) {
continue;
}
if ((float_support_->EffectiveOperandPrecisionIsLowPrecision(*hlo, i) ||
float_support_->EffectiveOperandPrecisionIsOutputPrecision(*hlo,
i)) &&
float_support_->SupportsLowPrecisionOperand(*hlo, i)) {
continue;
}
can_use_low_prec = false;
break;
}
if (can_use_low_prec) {
for (int i = 0; i < hlo->operand_count(); ++i) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, HighPrecisionType(), LowPrecisionType(), computation_));
}
return absl::OkStatus();
}
}
TF_RETURN_IF_ERROR(ChangeOutputTypeThenInsertConvertBack(
hlo, LowPrecisionType(), HighPrecisionType(), computation_));
for (int i = 0; i < hlo->operand_count(); ++i) {
TF_RETURN_IF_ERROR(InsertConvertBeforeOperand(
hlo, i, LowPrecisionType(), HighPrecisionType(), computation_));
}
return ConvertCalledComputations(hlo, low_precision_called_comps);
}
absl::Status FloatNormalizationVisitor::DefaultAction(HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kDomain ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kFusion ||
hlo->opcode() == HloOpcode::kConvert ||
hlo->opcode() == HloOpcode::kCall ||
hlo->opcode() == HloOpcode::kCustomCall ||
hlo->opcode() == HloOpcode::kWhile ||
hlo->opcode() == HloOpcode::kConditional ||
hlo->opcode() == HloOpcode::kBitcastConvert ||
hlo->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
if ((hlo->opcode() == HloOpcode::kSort ||
hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kReduceScatter) &&
hlo->shape().IsTuple()) {
return HandleMultipleOutputs(hlo);
}
return HandleInstruction(hlo);
}
absl::Status FloatNormalizationVisitor::Preprocess(HloInstruction* hlo) {
computation_ = hlo->parent();
return absl::OkStatus();
}
absl::flat_hash_set<HloComputation*>
CloneComputationsForNonNormalizingInstructions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module, execution_threads);
absl::flat_hash_set<HloComputation*> computations_to_skip;
for (const CallGraphNode& node : call_graph->nodes()) {
bool has_normalizing_users = false;
bool has_users_to_skip_normalization = false;
for (const CallSite& site : node.caller_callsites()) {
if (ShouldAvoidNormalizingComputationsForInstruction(
site.instruction())) {
has_users_to_skip_normalization = true;
} else {
has_normalizing_users = true;
}
}
if (!has_users_to_skip_normalization) {
continue;
}
if (!has_normalizing_users) {
computations_to_skip.insert(node.computation());
continue;
}
HloComputation* clone = module->DeepCloneComputation(node.computation());
for (const CallSite& site : node.caller_callsites()) {
if (ShouldAvoidNormalizingComputationsForInstruction(
site.instruction())) {
site.instruction()->ReplaceCalledComputations(
[&](HloComputation* called) {
return called == node.computation() ? clone : called;
});
}
}
computations_to_skip.insert(clone);
}
return computations_to_skip;
}
}
absl::StatusOr<bool> FloatNormalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "FloatNormalization::Run() for " +
primitive_util::LowercasePrimitiveTypeName(
float_support_->LowPrecisionType()) +
", before:\n" + module->ToString());
auto computations_to_visit =
module->MakeComputationPostOrder(execution_threads);
auto computations_to_skip =
CloneComputationsForNonNormalizingInstructions(module, execution_threads);
FloatNormalizationVisitor visitor(float_support_, this);
for (auto* comp : computations_to_visit) {
if (computations_to_skip.contains(comp)) continue;
TF_RETURN_IF_ERROR(comp->Accept(&visitor));
}
XLA_VLOG_LINES(2, "FloatNormalization::Run() for " +
primitive_util::LowercasePrimitiveTypeName(
float_support_->LowPrecisionType()) +
", after:\n" + module->ToString());
if (visitor.changed()) {
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(tuple_simplifier.Run(module).status());
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module).status());
}
return visitor.changed();
}
} | #include "xla/service/float_normalization.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
class TestFloatSupport : public FloatSupport {
public:
explicit TestFloatSupport(PrimitiveType low_precision_type,
PrimitiveType high_precision_type)
: FloatSupport(low_precision_type, high_precision_type) {}
~TestFloatSupport() override = default;
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll) {
return true;
}
if (hlo.opcode() == HloOpcode::kDot) {
return operand_index == 0;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kDot || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll) {
return true;
}
return false;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement) {
return true;
}
return false;
}
};
class TestFloatNoComputeSupport : public FloatSupport {
public:
explicit TestFloatNoComputeSupport(PrimitiveType low_precision_type,
PrimitiveType high_precision_type)
: FloatSupport(low_precision_type, high_precision_type) {}
~TestFloatNoComputeSupport() override = default;
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll ||
hlo.opcode() == HloOpcode::kAllReduce ||
hlo.opcode() == HloOpcode::kReduceScatter) {
return true;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllToAll ||
hlo.opcode() == HloOpcode::kAllReduce ||
hlo.opcode() == HloOpcode::kReduceScatter) {
return true;
}
return false;
}
};
class FloatNormalizationTest : public HloTestBase {
protected:
FloatNormalizationTest()
: HloTestBase(false,
true) {}
bool Normalize(HloModule* module, PrimitiveType low_precision_type = BF16,
PrimitiveType high_precision_type = F32) {
TestFloatSupport float_support(low_precision_type, high_precision_type);
FloatNormalization normalization(&float_support);
absl::StatusOr<bool> result = normalization.Run(module);
EXPECT_IS_OK(result.status());
HloVerifier verifier(false,
true);
EXPECT_IS_OK(verifier.Run(module).status());
return result.value();
}
};
class FloatNormalizationF8Test
: public FloatNormalizationTest,
public ::testing::WithParamInterface<PrimitiveType> {};
INSTANTIATE_TEST_SUITE_P(FloatNormalizationF8Suite, FloatNormalizationF8Test,
::testing::Values(F8E3M4, F8E4M3, F8E5M2));
TEST_F(FloatNormalizationTest, NoopIfSupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, add0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction(), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveIfUnsupportedBF16) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kMultiply, a, b));
HloInstruction* mul1 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kMultiply, mul0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(computation->root_instruction()->operand(0), mul1);
EXPECT_EQ(mul0->shape().element_type(), F32);
EXPECT_EQ(mul1->shape().element_type(), F32);
EXPECT_EQ(mul1->operand(0)->opcode(), HloOpcode::kConvert);
}
TEST_F(FloatNormalizationTest, ResolveUnsupportedMixedPrecisionSubtraction) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* sub0 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kSubtract, a, b));
HloInstruction* sub1 = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kSubtract, sub0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(computation->root_instruction()->operand(0), sub1);
EXPECT_EQ(sub0->shape().element_type(), F32);
EXPECT_EQ(sub1->shape().element_type(), F32);
EXPECT_EQ(sub1->operand(0)->opcode(), HloOpcode::kConvert);
}
TEST_F(FloatNormalizationTest, ResolveUnsupportedMixedPrecisionReduce) {
Shape f32_input_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape f32_output_shape = ShapeUtil::MakeShape(F32, {4});
Shape bf16_scalar_shape = ShapeUtil::MakeShape(BF16, {});
auto reduce_comp_builder = HloComputation::Builder("reduce_comp");
auto reduce_comp_param0 = reduce_comp_builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_scalar_shape, "param0"));
auto reduce_comp_param1 = reduce_comp_builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_scalar_shape, "param1"));
reduce_comp_builder.AddInstruction(
HloInstruction::CreateBinary(bf16_scalar_shape, HloOpcode::kAdd,
reduce_comp_param0, reduce_comp_param1));
auto module = CreateNewVerifiedModule();
auto reduce_computation =
module->AddEmbeddedComputation(reduce_comp_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_input_shape, "a"));
HloInstruction* init = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_scalar_shape, "init"));
HloInstruction* reduce = builder.AddInstruction(HloInstruction::CreateReduce(
f32_output_shape, input, init, {0}, reduce_computation));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction(), reduce);
EXPECT_EQ(reduce->called_computations().size(), 1);
EXPECT_EQ(reduce->called_computations()[0]->num_parameters(), 2);
EXPECT_EQ(reduce->called_computations()[0]
->parameter_instruction(0)
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->called_computations()[0]
->parameter_instruction(1)
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->called_computations()[0]
->root_instruction()
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->shape().element_type(), F32);
EXPECT_EQ(reduce->operand(0), input);
EXPECT_EQ(input->shape().element_type(), F32);
EXPECT_EQ(reduce->operand(1)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(reduce->operand(1)->shape().element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleAllReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({f32_shape, bf16_shape}), {a, b}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(bf16_shape, crs, 1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(1)->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleAllToAllToBF16) {
auto module = CreateNewVerifiedModule(TestName(), 2);
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
std::vector<ReplicaGroup> replica_groups(1);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(1);
HloInstruction* a2a = builder.AddInstruction(HloInstruction::CreateAllToAll(
ShapeUtil::MakeTupleShape({bf16_shape, bf16_shape}), {a, a},
CollectiveDeviceList(replica_groups), false,
std::nullopt));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction(), a2a);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {0}).element_type(), BF16);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {1}).element_type(), BF16);
EXPECT_EQ(a2a->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(a2a->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(a2a->operand(1)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(a2a->operand(1)->shape().element_type(), BF16);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleAllToAllToF32) {
auto module = CreateNewVerifiedModule(TestName(), 2);
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
std::vector<ReplicaGroup> replica_groups(1);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(1);
HloInstruction* a2a = builder.AddInstruction(HloInstruction::CreateAllToAll(
ShapeUtil::MakeTupleShape({bf16_shape, f32_shape}), {a, a},
CollectiveDeviceList(replica_groups), false,
std::nullopt));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kTuple);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {0}).element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(a2a->shape(), {1}).element_type(), F32);
EXPECT_EQ(a2a->operand(0)->opcode(), HloOpcode::kParameter);
EXPECT_EQ(a2a->operand(0)->shape().element_type(), F32);
EXPECT_EQ(a2a->operand(1)->opcode(), HloOpcode::kParameter);
EXPECT_EQ(a2a->operand(1)->shape().element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleSort) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {1024});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {1024});
Shape s32_shape = ShapeUtil::MakeShape(BF16, {1024});
HloInstruction* key = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "key"));
HloInstruction* value = builder.AddInstruction(
HloInstruction::CreateParameter(1, s32_shape, "value"));
TF_ASSERT_OK_AND_ASSIGN(
auto* sort,
MakeSortHlo(ShapeUtil::MakeTupleShape({bf16_shape, s32_shape}),
{key, value}, 0, false, &builder,
module.get()));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(bf16_shape, sort, 0));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(sort->operand(0)->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(sort->shape(), {0}).element_type(), F32);
}
TEST_F(FloatNormalizationTest, ResolveMixedPrecisionTupleSortRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {1024});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {1024});
HloInstruction* key = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "key"));
HloInstruction* value = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "value"));
TF_ASSERT_OK_AND_ASSIGN(
auto* sort,
MakeSortHlo(ShapeUtil::MakeTupleShape({bf16_shape, f32_shape}),
{key, value}, 0, false, &builder,
module.get()));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(sort->operand(0)->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(sort->shape(), {0}).element_type(), F32);
EXPECT_NE(computation->root_instruction(), sort);
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kTuple);
EXPECT_EQ(sort->to_apply()->parameter_instruction(1)->shape().element_type(),
F32);
auto users = sort->to_apply()->parameter_instruction(1)->users();
for (auto user : users) {
EXPECT_NE(user->opcode(), HloOpcode::kConvert);
}
}
TEST_F(FloatNormalizationTest, DoNotAddUnsupportedMixedPrecision) {
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {4, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
HloInstruction* dot = builder.AddInstruction(
HloInstruction::CreateDot(bf16_shape, a, b, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(dot->shape().element_type(), F32);
EXPECT_EQ(dot->operand(0)->shape().element_type(), F32);
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_EQ(dot->operand(1)->shape().element_type(), F32);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kConvert);
}
TEST_F(FloatNormalizationTest, DoNotChangeBitcastConvert) {
auto builder = HloComputation::Builder(TestName());
Shape u16_shape = ShapeUtil::MakeShape(U16, {4, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {4, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, u16_shape, "a"));
builder.AddInstruction(HloInstruction::CreateBitcastConvert(bf16_shape, a));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
auto root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBitcastConvert);
EXPECT_EQ(root->shape().element_type(), BF16);
EXPECT_EQ(root->operand(0)->shape().element_type(), U16);
}
TEST_P(FloatNormalizationF8Test, ResolveIfUnsupportedF8) {
PrimitiveType f8_type = GetParam();
auto builder = HloComputation::Builder(TestName());
Shape f16_shape = ShapeUtil::MakeShape(F16, {2, 4});
Shape f8_shape = ShapeUtil::MakeShape(f8_type, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f16_shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, f8_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f16_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(f8_shape, HloOpcode::kMultiply, a, b));
HloInstruction* mul1 = builder.AddInstruction(
HloInstruction::CreateBinary(f8_shape, HloOpcode::kMultiply, mul0, c));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get(), f8_type, F16));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kConvert);
EXPECT_EQ(computation->root_instruction()->operand(0), mul1);
EXPECT_EQ(mul0->shape().element_type(), F16);
EXPECT_EQ(mul1->shape().element_type(), F16);
EXPECT_EQ(mul1->operand(0)->opcode(), HloOpcode::kConvert);
}
class FloatNormalizationNoComputeSupportTest : public FloatNormalizationTest {
protected:
bool Normalize(HloModule* module, PrimitiveType low_precision_type = BF16,
PrimitiveType high_precision_type = F32) {
TestFloatNoComputeSupport float_support(low_precision_type,
high_precision_type);
FloatNormalization normalization(&float_support);
absl::StatusOr<bool> result = normalization.Run(module);
EXPECT_IS_OK(result.status());
HloVerifier verifier(false,
true);
EXPECT_IS_OK(verifier.Run(module).status());
return result.value();
}
};
TEST_F(FloatNormalizationNoComputeSupportTest,
NoNormalizationForToApplyMultiOutputAllReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
Shape bf16_shape_b = ShapeUtil::MakeShape(BF16, {16, 16});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape_b, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({bf16_shape_a, bf16_shape_b}), {a, b},
reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(bf16_shape_b, crs, 1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(1)->shape().element_type(), BF16);
EXPECT_EQ(crs->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), BF16);
}
TEST_F(FloatNormalizationNoComputeSupportTest,
NormalizationClonesSharedApplyAllReduceAndReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
Shape bf16_shape_b = ShapeUtil::MakeShape(BF16, {2, 4, 2});
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape_b, "b"));
Shape bf16_scalar_shape = ShapeUtil::MakeShape(BF16, {});
HloInstruction* init = builder.AddInstruction(
HloInstruction::CreateParameter(2, bf16_scalar_shape, "init"));
HloInstruction* all_reduce = builder.AddInstruction(
HloInstruction::CreateAllReduce(bf16_shape_a, {a}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
HloInstruction* reduce = builder.AddInstruction(
HloInstruction::CreateReduce(bf16_shape_a, b, init, {2}, reduction));
builder.AddInstruction(HloInstruction::CreateBinary(
bf16_shape_a, HloOpcode::kAdd, all_reduce, reduce));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(all_reduce->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(all_reduce->to_apply()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(all_reduce->to_apply()->root_instruction()->shape().element_type(),
BF16);
EXPECT_EQ(reduce->called_computations().size(), 1);
EXPECT_EQ(reduce->called_computations()[0]
->root_instruction()
->shape()
.element_type(),
F32);
EXPECT_EQ(reduce->called_computations()[0]->root_instruction()->opcode(),
HloOpcode::kConvert);
EXPECT_EQ(reduce->shape().element_type(), F32);
}
TEST_F(FloatNormalizationNoComputeSupportTest,
NoNormalizationForToApplyAllReduce) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
HloInstruction* crs = builder.AddInstruction(
HloInstruction::CreateAllReduce(bf16_shape_a, {a}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(crs->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
}
TEST_F(FloatNormalizationNoComputeSupportTest,
NoNormalizationForToApplyReduceScatter) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("sum");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(BF16, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(BF16, {}), HloOpcode::kAdd, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape_a = ShapeUtil::MakeShape(BF16, {2, 4});
Shape bf16_shape_scattered = ShapeUtil::MakeShape(BF16, {1, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape_a, "a"));
HloInstruction* crs =
builder.AddInstruction(HloInstruction::CreateReduceScatter(
bf16_shape_scattered, {a}, reduction,
CollectiveDeviceList(),
false,
std::nullopt,
false, 0));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(Normalize(module.get()));
EXPECT_EQ(computation->root_instruction()->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0)->shape().element_type(), BF16);
EXPECT_EQ(crs->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
}
TEST_F(FloatNormalizationTest, ConvertBeforeTuple) {
auto builder = HloComputation::Builder(TestName());
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(bf16_shape, HloOpcode::kMultiply, a, b));
HloInstruction* convert =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, add));
builder.AddInstruction(HloInstruction::CreateVariadic(
ShapeUtil::MakeTupleShape({f32_shape, bf16_shape}), HloOpcode::kTuple,
{convert, add}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(Normalize(module.get(), BF16));
EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kTuple);
EXPECT_EQ(computation->root_instruction()->operand(0)->shape().element_type(),
F32);
EXPECT_EQ(
computation->root_instruction()->shape().tuple_shapes(0).element_type(),
F32);
}
TEST_F(FloatNormalizationTest, KeepEntryInputOutputAlias) {
const std::string hlo_text = R"(
HloModule m,
input_output_alias={ {}: (1, {}, must-alias) },
entry_computation_layout={(bf16[1,64], bf16[1,64])->bf16[1,64]}
ENTRY e {
arg_0 = bf16[1,64] parameter(0)
output_param = bf16[1,64] parameter(1)
constant = bf16[] constant(2)
broadcast = bf16[1,64] broadcast(constant), dimensions={}
ROOT multiply = bf16[1,64] multiply(arg_0, broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
EXPECT_TRUE(Normalize(module.get(), BF16));
HloInputOutputAliasConfig& alias_config = module->input_output_alias_config();
ASSERT_FALSE(alias_config.ParameterHasAlias(0, {}));
ASSERT_TRUE(alias_config.ParameterHasAlias(1, {}));
ASSERT_TRUE(alias_config.OutputHasAlias({}));
EXPECT_EQ(alias_config.GetAliasedParameter({})->parameter_number, 1);
EXPECT_EQ(alias_config.GetAliasedParameter({})->kind,
HloInputOutputAliasConfig::AliasKind::kMustAlias);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/float_normalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/float_normalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6100ee2e-3b26-4512-b2b6-4fe46170fb8c | cpp | tensorflow/tensorflow | layout_normalization | third_party/xla/xla/service/layout_normalization.cc | third_party/xla/xla/service/layout_normalization_test.cc | #include "xla/service/layout_normalization.h"
#include <algorithm>
#include <cstring>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/permutation_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class LayoutNormalizationVisitor : public DfsHloRewriteVisitor {
public:
explicit LayoutNormalizationVisitor(
const CustomCallTransformer& custom_call_transformer = nullptr)
: custom_call_transformer_(custom_call_transformer) {}
absl::Status HandleConstant(HloInstruction* hlo) override {
Literal& literal = *Cast<HloConstantInstruction>(hlo)->mutable_literal();
if (literal.shape().IsTuple()) {
return absl::OkStatus();
}
const Shape& shape = hlo->shape();
Shape normalized_shape = Normalize(shape);
*literal.mutable_shape_do_not_use() = normalized_shape;
literal.mutable_shape_do_not_use()
->mutable_layout()
->set_element_size_in_bits(0);
HloInstruction* bc_to_orig = MakeBitcastHlo(hlo, shape);
*hlo->mutable_shape() = normalized_shape;
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWithDifferentShape(bc_to_orig));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
const Shape& s = hlo->shape();
const Shape& operand_shape = operand->shape();
TF_RET_CHECK(s.layout() == operand_shape.layout());
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
Shape normalized = Normalize(operand_shape);
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
auto normalize_slice_attr = [&](absl::Span<int64_t const> input) {
return Permute(input, layout_as_permutation);
};
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_slice,
MakeSliceHlo(normalized_input,
normalize_slice_attr(hlo->slice_starts()),
normalize_slice_attr(hlo->slice_limits()),
normalize_slice_attr(hlo->slice_strides()),
&hlo->metadata()));
*normalized_slice->mutable_shape()->mutable_layout() =
normalized_input->shape().layout();
SetVisited(*normalized_slice);
HloInstruction* bc_to_orig = MakeBitcastHlo(normalized_slice, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status DefaultAction(HloInstruction* hlo) override {
if (!hlo->user_count()) {
return absl::OkStatus();
}
auto users = hlo->users();
auto shape = hlo->shape();
if (shape.IsTuple() || shape.IsToken()) {
return absl::OkStatus();
}
auto normalized_shape = Normalize(shape);
auto bc_to_normalized = MakeBitcastHlo(hlo, normalized_shape);
SetVisited(*bc_to_normalized);
auto bc_to_orig = MakeBitcastHlo(bc_to_normalized, shape);
TF_RETURN_IF_ERROR(hlo->ReplaceUsesWith(users, bc_to_orig));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleConcatenate(HloInstruction* hlo) override {
const Shape& s = hlo->shape();
int64_t orig_concat_dim = hlo->dimensions(0);
std::vector<HloInstruction*> normalized_inputs;
for (HloInstruction* operand : hlo->mutable_operands()) {
TF_ASSIGN_OR_RETURN(auto normalized_input, GetNormalizedInput(operand));
normalized_inputs.push_back(normalized_input);
}
auto normalized_shape = Normalize(s);
auto layout_as_permutation = ToTransposeDimensions(s.layout());
int64_t normalized_concat_dim =
InversePermutation(layout_as_permutation)[orig_concat_dim];
auto normalized_concat =
hlo->AddInstruction(HloInstruction::CreateConcatenate(
normalized_shape, normalized_inputs, normalized_concat_dim));
SetVisited(*normalized_concat);
auto bc_to_orig = MakeBitcastHlo(normalized_concat, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleReduceWindow(HloInstruction* hlo) override {
if (hlo->shape().IsTuple()) {
return absl::OkStatus();
}
HloInstruction* operand = hlo->mutable_operand(0);
TF_RET_CHECK(hlo->shape().layout() == operand->shape().layout());
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
std::vector<WindowDimension> window_dimensions;
for (const WindowDimension& d : hlo->window().dimensions()) {
window_dimensions.push_back(d);
}
window_dimensions = Permute(window_dimensions, layout_as_permutation);
Window new_window;
for (const WindowDimension& d : window_dimensions) {
*new_window.add_dimensions() = d;
}
TF_ASSIGN_OR_RETURN(
HloInstruction * rw,
MakeReduceWindowHlo(normalized_input, hlo->mutable_operand(1),
new_window, hlo->called_computations()[0],
&hlo->metadata()));
SetVisited(*rw);
HloInstruction* bc_to_orig = MakeBitcastHlo(rw, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleBroadcast(HloInstruction* hlo) override {
VLOG(3) << "Input broadcast: " << hlo->ToString();
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto normalized_input, GetNormalizedInput(operand));
auto normalized_shape = Normalize(s);
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(operand->shape().layout());
std::vector<int64_t> orig_output_layout_as_permutation =
ToTransposeDimensions(s.layout());
std::vector<int64_t> br_dimensions;
if (!hlo->dimensions().empty()) {
br_dimensions.reserve(hlo->dimensions().size());
auto inverse_perm = InversePermutation(orig_output_layout_as_permutation);
for (int64_t dim :
ComposePermutations(hlo->dimensions(), layout_as_permutation)) {
br_dimensions.push_back(inverse_perm[dim]);
}
}
auto normalized_broadcast = MakeBroadcastHlo(
normalized_input, br_dimensions, normalized_shape, &hlo->metadata());
SetVisited(*normalized_broadcast);
VLOG(3) << "Generated broadcast: " << normalized_broadcast->ToString();
auto bc_to_orig = MakeBitcastHlo(normalized_broadcast, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleIota(HloInstruction* hlo) override {
VLOG(3) << "Input iota: " << hlo->ToString();
auto s = hlo->shape();
auto normalized_shape = Normalize(s);
std::vector<int64_t> orig_output_layout_as_permutation =
ToTransposeDimensions(s.layout());
int64_t new_iota_dimension = InversePermutation(
orig_output_layout_as_permutation)[hlo->dimensions()[0]];
auto normalized_iota = hlo->AddInstruction(
HloInstruction::CreateIota(normalized_shape, new_iota_dimension));
SetVisited(*normalized_iota);
VLOG(3) << "Generated iota: " << normalized_iota->ToString();
auto bc_to_orig = MakeBitcastHlo(normalized_iota, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* hlo) override {
if (hlo->shape().rank() == hlo->operand(0)->shape().rank()) {
return HandleElementwiseUnary(hlo);
}
return DefaultAction(hlo);
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
auto operand_shape = operand->shape();
TF_RET_CHECK(
Layout::Equal().IgnoreElementSize()(s.layout(), operand_shape.layout()))
<< "Unexpected non-layout preserving elementwise unary: "
<< hlo->ToString();
TF_ASSIGN_OR_RETURN(auto normalized_input, GetNormalizedInput(operand));
PrimitiveType to_element_type = s.element_type();
HloInstruction* new_unary;
if (hlo->opcode() == HloOpcode::kConvert) {
new_unary =
MakeConvertToHlo(normalized_input, to_element_type, &hlo->metadata());
} else if (hlo->opcode() == HloOpcode::kReducePrecision) {
new_unary =
MakeReducePrecisionHlo(normalized_input, hlo->exponent_bits(),
hlo->mantissa_bits(), &hlo->metadata());
} else if (hlo->opcode() == HloOpcode::kBitcastConvert) {
new_unary = MakeBitcastConvertToHlo(normalized_input, to_element_type,
&hlo->metadata());
} else {
TF_ASSIGN_OR_RETURN(
new_unary,
MakeUnaryHlo(hlo->opcode(), normalized_input, &hlo->metadata()));
}
if (normalized_input != new_unary) {
SetVisited(*new_unary);
}
auto bc_to_orig = MakeBitcastHlo(new_unary, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
auto s = hlo->shape();
auto a = hlo->mutable_operand(0);
auto b = hlo->mutable_operand(1);
auto layout_equal = Layout::Equal();
if (hlo->opcode() == HloOpcode::kCompare) {
layout_equal.IgnoreElementSize();
}
TF_RET_CHECK(layout_equal(a->shape().layout(), s.layout()));
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(a));
TF_ASSIGN_OR_RETURN(auto b0, GetNormalizedInput(b));
HloInstruction* new_binary;
if (hlo->opcode() == HloOpcode::kCompare) {
TF_ASSIGN_OR_RETURN(new_binary,
MakeCompareHlo(hlo->comparison_direction(), a0, b0,
&hlo->metadata()));
} else {
TF_ASSIGN_OR_RETURN(
new_binary, MakeBinaryHlo(hlo->opcode(), a0, b0, &hlo->metadata()));
}
SetVisited(*new_binary);
auto bc_to_orig = MakeBitcastHlo(new_binary, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleReshape(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_RET_CHECK(ShapeUtil::ReshapeIsBitcast(s, operand->shape()));
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
auto normalized_reshape_s = Normalize(s);
TF_ASSIGN_OR_RETURN(auto new_reshape,
MakeReshapeHlo(normalized_reshape_s, a0));
SetVisited(*new_reshape);
auto bc_to_orig = MakeBitcastHlo(new_reshape, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleScatter(HloInstruction* hlo) override {
auto* scatter = Cast<HloScatterInstruction>(hlo);
std::vector<HloInstruction*> normalized_operands;
normalized_operands.reserve(scatter->scatter_operand_count());
Shape operand_shape = scatter->scatter_operands().front()->shape();
for (HloInstruction* operand : scatter->scatter_operands()) {
if (operand->shape().layout() != operand_shape.layout()) {
return FailedPrecondition(
"All scatter operands must have the same layout");
}
TF_ASSIGN_OR_RETURN(auto normalized_operand, GetNormalizedInput(operand));
normalized_operands.push_back(normalized_operand);
}
std::vector<HloInstruction*> normalized_updates;
normalized_updates.reserve(scatter->scatter_operand_count());
Shape update_shape = scatter->scatter_updates().front()->shape();
for (HloInstruction* operand : scatter->scatter_updates()) {
if (operand->shape().layout() != update_shape.layout()) {
return FailedPrecondition(
"All scatter updates must have the same layout");
}
TF_ASSIGN_OR_RETURN(auto normalized_update, GetNormalizedInput(operand));
normalized_updates.push_back(normalized_update);
}
const auto& dims = scatter->scatter_dimension_numbers();
if (scatter->scatter_updates().front()->shape().rank() -
dims.update_window_dims_size() >
1) {
return FailedPrecondition(
"There should be just a single scatter dimension. Make sure to run "
"ScatterSimplifier before LayoutNormalization");
}
TF_ASSIGN_OR_RETURN(auto normalized_indices,
GetNormalizedInput(scatter->scatter_indices()));
auto indices_permutation = InversePermutation(
ToTransposeDimensions(scatter->scatter_indices()->shape().layout()));
auto layout_permutation =
ToTransposeDimensions(scatter->scatter_operands()[0]->shape().layout());
auto operand_permutation = InversePermutation(layout_permutation);
auto update_permutation = InversePermutation(
ToTransposeDimensions(scatter->scatter_updates()[0]->shape().layout()));
ScatterDimensionNumbers normalized_dims;
normalized_dims.set_index_vector_dim(
indices_permutation[dims.index_vector_dim()]);
for (int64_t dim : dims.scatter_dims_to_operand_dims()) {
normalized_dims.add_scatter_dims_to_operand_dims(
operand_permutation[dim]);
}
std::vector<int64_t> normalized_update_window_dims;
normalized_update_window_dims.reserve(dims.update_window_dims_size());
for (int64_t dim : dims.update_window_dims()) {
normalized_update_window_dims.push_back(update_permutation[dim]);
}
std::vector<int64_t> window_dimensions(operand_permutation.size());
for (int64_t i = 0, j = 0, k = 0; i < window_dimensions.size(); ++i) {
if (j < dims.inserted_window_dims_size() &&
dims.inserted_window_dims(j) == i) {
window_dimensions[i] = -1;
++j;
} else {
window_dimensions[i] = normalized_update_window_dims[k];
++k;
}
}
std::vector<int64_t> permuted_window_dimensions =
ComposePermutations(window_dimensions, layout_permutation);
for (int64_t i = 0; i < permuted_window_dimensions.size(); ++i) {
if (permuted_window_dimensions[i] == -1) {
normalized_dims.add_inserted_window_dims(i);
} else {
normalized_dims.add_update_window_dims(permuted_window_dimensions[i]);
}
}
auto normalized_shape = normalized_operands.front()->shape();
if (scatter->shape().IsTuple()) {
std::vector<Shape> tuple_shapes;
tuple_shapes.reserve(normalized_operands.size());
for (HloInstruction* operand : normalized_operands) {
tuple_shapes.push_back(operand->shape());
}
normalized_shape = ShapeUtil::MakeTupleShape(tuple_shapes);
}
auto normalized_scatter = hlo->AddInstruction(HloInstruction::CreateScatter(
normalized_shape, normalized_operands, normalized_indices,
normalized_updates, scatter->to_apply(), normalized_dims,
scatter->indices_are_sorted(), scatter->unique_indices()));
SetVisited(*normalized_scatter);
auto bc_to_orig = MakeBitcastHlo(normalized_scatter, scatter->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(scatter, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleTranspose(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
auto operand_s = operand->shape();
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
auto normalized_shape = Normalize(s);
VLOG(3) << "Input transpose: " << hlo->ToString();
if (!ShapeUtil::TransposeIsBitcast(s, operand_s, hlo->dimensions())) {
auto l0_perm =
InversePermutation(ToTransposeDimensions(operand_s.layout()));
auto l_perm = ToTransposeDimensions(s.layout());
auto t = ComposePermutations(l0_perm, hlo->dimensions());
auto dimensions = ComposePermutations(t, l_perm);
auto normalized_transpose = hlo->AddInstruction(
HloInstruction::CreateTranspose(normalized_shape, a0, dimensions));
SetVisited(*normalized_transpose);
VLOG(3) << "Generated normalized physical transpose: "
<< normalized_transpose->ToString();
auto bc_to_orig = MakeBitcastHlo(normalized_transpose, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
} else {
auto bc_to_orig = MakeBitcastHlo(a0, s, &hlo->metadata());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
}
return absl::OkStatus();
}
absl::Status HandleCopy(HloInstruction* hlo) override {
VLOG(3) << "Processing copy: " << hlo->ToString();
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
auto s_normalized = Normalize(s);
auto l0_perm =
InversePermutation(ToTransposeDimensions(operand->shape().layout()));
auto l_perm = ToTransposeDimensions(s.layout());
auto dimensions = ComposePermutations(l0_perm, l_perm);
auto t = hlo->AddInstruction(
HloInstruction::CreateTranspose(s_normalized, a0, dimensions));
SetVisited(*t);
auto bc_to_orig = MakeBitcastHlo(t, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleReverse(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
std::vector<int64_t> new_dimensions;
new_dimensions.reserve(hlo->dimensions().size());
auto inverse_perm = InversePermutation(layout_as_permutation);
for (int64_t dim : hlo->dimensions()) {
new_dimensions.push_back(inverse_perm[dim]);
}
absl::c_sort(new_dimensions);
auto normalized_reverse = hlo->AddInstruction(
HloInstruction::CreateReverse(a0->shape(), a0, new_dimensions));
SetVisited(*normalized_reverse);
auto bc_to_orig = MakeBitcastHlo(normalized_reverse, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandlePad(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
auto padded_by = hlo->mutable_operand(1);
auto padded_config = hlo->padding_config();
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
auto s_normalized = Normalize(s);
auto layout_as_permutation = ToTransposeDimensions(s.layout());
PaddingConfig new_padding;
new_padding.mutable_dimensions()->Reserve(s_normalized.dimensions_size());
for (int dim = 0; dim < s_normalized.dimensions_size(); dim++) {
new_padding.add_dimensions();
}
auto inverse_perm = InversePermutation(layout_as_permutation);
for (int dim = 0; dim < s.dimensions_size(); dim++) {
int tr_dim = static_cast<int>(inverse_perm[dim]);
*new_padding.mutable_dimensions(tr_dim) = padded_config.dimensions(dim);
}
auto padded_normalized = hlo->AddInstruction(HloInstruction::CreatePad(
s_normalized, normalized_input, padded_by, new_padding));
SetVisited(*padded_normalized);
auto bc_to_orig = MakeBitcastHlo(padded_normalized, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleCustomCall(HloInstruction* hlo) override {
if (custom_call_transformer_) {
TF_ASSIGN_OR_RETURN(
std::optional<HloInstruction*> transformed_custom_call,
custom_call_transformer_(Cast<HloCustomCallInstruction>(hlo)));
if (transformed_custom_call) {
SetVisited(*(*transformed_custom_call)->operand(0));
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, *transformed_custom_call));
return absl::OkStatus();
}
}
return DefaultAction(hlo);
}
absl::Status HandleSelect(HloInstruction* hlo) override {
return HandleTernary(hlo);
}
absl::Status HandleDynamicSlice(HloInstruction* hlo) override {
const Shape& s = hlo->shape();
HloInstruction* operand = hlo->mutable_operand(0);
const Shape& operand_shape = operand->shape();
TF_RET_CHECK(s.layout() == operand_shape.layout());
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
Shape normalized = Normalize(operand_shape);
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
std::vector<HloInstruction*> new_start_indices =
GetNewStartIdxs(hlo, 1, layout_as_permutation);
auto normalize_slice_attr = [&](absl::Span<int64_t const> input) {
return Permute(input, layout_as_permutation);
};
TF_ASSIGN_OR_RETURN(
HloInstruction * normalized_dynamic_slice,
MakeDynamicSliceHlo(normalized_input, new_start_indices,
normalize_slice_attr(hlo->dynamic_slice_sizes()),
&hlo->metadata()));
*normalized_dynamic_slice->mutable_shape()->mutable_layout() =
normalized_input->shape().layout();
SetVisited(*normalized_dynamic_slice);
HloInstruction* bc_to_orig = MakeBitcastHlo(normalized_dynamic_slice, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override {
const Shape& s = hlo->shape();
HloInstruction* operand = hlo->mutable_operand(0);
HloInstruction* update = hlo->mutable_operand(1);
const Shape& operand_shape = operand->shape();
TF_RET_CHECK(s.layout() == operand_shape.layout());
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
TF_ASSIGN_OR_RETURN(HloInstruction * new_operand,
GetNormalizedInput(operand));
TF_ASSIGN_OR_RETURN(HloInstruction * new_update,
GetNormalizedInput(update));
std::vector<HloInstruction*> new_start_indices =
GetNewStartIdxs(hlo, 2, layout_as_permutation);
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dus,
MakeDynamicUpdateSliceHlo(new_operand, new_update, new_start_indices,
&hlo->metadata()));
*new_dus->mutable_shape()->mutable_layout() = new_operand->shape().layout();
SetVisited(*new_dus);
HloInstruction* bc_to_orig = MakeBitcastHlo(new_dus, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleClamp(HloInstruction* hlo) override {
return HandleTernary(hlo);
}
private:
absl::Status HandleTernary(HloInstruction* hlo) {
Shape s = hlo->shape();
HloOpcode opcode = hlo->opcode();
TF_RET_CHECK(opcode == HloOpcode::kClamp || opcode == HloOpcode::kSelect);
HloInstruction* arg0 = hlo->mutable_operand(0);
HloInstruction* arg1 = hlo->mutable_operand(1);
HloInstruction* arg2 = hlo->mutable_operand(2);
if (opcode == HloOpcode::kClamp) {
TF_RET_CHECK(arg1->shape().layout() == s.layout());
} else if (opcode == HloOpcode::kSelect) {
TF_RET_CHECK(arg1->shape().layout() == s.layout());
TF_RET_CHECK(arg2->shape().layout() == s.layout());
} else {
TF_RET_CHECK(false);
}
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_arg0,
GetNormalizedInput(arg0));
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_arg1,
GetNormalizedInput(arg1));
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_arg2,
GetNormalizedInput(arg2));
TF_ASSIGN_OR_RETURN(Shape new_shape, ShapeInference::InferTernaryOpShape(
opcode, normalized_arg0,
normalized_arg1, normalized_arg2));
HloInstruction* normalized = hlo->parent()->AddInstruction(
HloInstruction::CreateTernary(new_shape, opcode, normalized_arg0,
normalized_arg1, normalized_arg2));
hlo->SetupDerivedInstruction(normalized);
SetVisited(*normalized);
HloInstruction* bc_to_orig = MakeBitcastHlo(normalized, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
std::vector<HloInstruction*> GetNewStartIdxs(
HloInstruction* hlo, int param_offset,
const std::vector<int64_t> layout_as_permutation) {
std::vector<HloInstruction*> start_indices;
for (int i = param_offset; i < hlo->operand_count(); i++) {
start_indices.push_back(hlo->mutable_operand(i));
}
std::vector<HloInstruction*> permuted_start_indices =
Permute(start_indices, layout_as_permutation);
return permuted_start_indices;
}
std::vector<int64_t> ToTransposeDimensions(const Layout& l) {
std::vector<int64_t> out(l.minor_to_major().begin(),
l.minor_to_major().end());
absl::c_reverse(out);
return out;
}
absl::StatusOr<HloInstruction*> GetNormalizedInput(HloInstruction* hlo) {
TF_RET_CHECK(hlo->opcode() == HloOpcode::kBitcast)
<< "Unexpected HLO input: " << hlo->ToString();
auto input = hlo->mutable_operand(0);
auto input_shape = input->shape();
TF_RET_CHECK(Layout::Equal().IgnoreElementSize()(
input_shape.layout(),
LayoutUtil::GetDefaultLayoutForShape(input_shape)));
return input;
}
Shape Normalize(const Shape& s) {
return ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(s);
}
CustomCallTransformer custom_call_transformer_;
};
}
absl::StatusOr<bool> LayoutNormalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return LayoutNormalizationVisitor{custom_call_transformer_}.RunOnModule(
module, execution_threads);
}
} | #include "xla/service/layout_normalization.h"
#include <functional>
#include <optional>
#include <utility>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class LayoutNormalizationTest : public HloTestBase {
public:
void CheckLayoutNormalization(
absl::string_view hlo, std::optional<absl::string_view> expected,
std::function<void(HloModule*)> after_pass_checks = nullptr) {
RunAndFilecheckHloRewrite(hlo, LayoutNormalization{}, expected,
after_pass_checks);
}
};
TEST_F(LayoutNormalizationTest, TestDefault) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,4]{0,1} parameter(0)
ROOT o = f32[5,4]{0,1} abs(p)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, TestUnary) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,4]{0,1} parameter(0)
a = f32[5,4]{0,1} abs(p)
ROOT b = f32[5,4]{0,1} sqrt(a)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, TestUnaryDegenerateDimensions) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,1,4,1]{0,1,2,3} parameter(0)
ROOT o = f32[5,1,4,1]{0,1,2,3} abs(p)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, TestBinary) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[5,4]{0,1} parameter(0)
b = f32[5,4]{0,1} parameter(1)
c = add(a, b)
ROOT out = sqrt(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Reshape) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[5,4]{0,1} parameter(0)
ROOT b = f32[5,2,2]{0,2,1} reshape(a)
})";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Transpose) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[5,4]{1,0} parameter(0)
t = f32[4,5]{0,1} transpose(a), dimensions={1,0}
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PhysicalTranspose) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f64[3,4,5]{0,1,2} parameter(0)
t = f32[5,4,3]{2,0,1} transpose(p), dimensions={2,1,0}
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PhysicalTransposeDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[3,4,5,1]{0,1,2,3} parameter(0)
t = f32[5,1,4,3]{3,2,0,1} transpose(p), dimensions={2,3,1,0}
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Copy) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[3,4,5]{0,1,2} parameter(0)
t = f32[3,4,5]{2,1,0} copy(p)
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, CopyDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[3,1,4,1,5]{0,1,2,3,4} parameter(0)
t = f32[3,1,4,1,5]{4,3,2,1,0} copy(p)
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Broadcast) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[4,5]{0,1} parameter(0)
b = f32[4,3,2,5]{0,1,2,3} broadcast(a), dimensions={0,3}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastOperandLayoutNotInverseOfItself) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[4,3,5]{0,2,1} parameter(0)
b = f32[4,3,2,5]{0,1,2,3} broadcast(a), dimensions={0,1,3}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastCustomOutputLayout) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,3]{1,0} parameter(0)
b = f32[2,4,3]{1,2,0} broadcast(a), dimensions={0,2}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastUnsortedDimensions) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,3]{1,0} parameter(0)
b = f32[3,4,2]{2,1,0} broadcast(a), dimensions={2,0}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastCustomOutputLayoutWithDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[9]{0} parameter(0)
b = f32[2,1,4,9]{2,0,1,3} broadcast(a), dimensions={3}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastWithDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,4,5]{0,1,2} parameter(0)
b = f32[1,4,3,1,2,5,1]{0,1,2,3,4,5,6} broadcast(a), dimensions={0,1,5}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, IotaCustomOutputLayout) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,4,3]{1,2,0} iota(), iota_dimension=2
ROOT out = abs(a)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Concatenate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[4,5]{0,1} parameter(0)
b = f32[4,5]{0,1} parameter(1)
c = f32[8,5]{0,1} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateDegenerateDim) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,4,5]{0,1,2} parameter(0)
b = f32[1,4,5]{0,1,2} parameter(1)
c = f32[2,4,5]{0,1,2} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateOneDegenerateDim) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,5]{0,1} parameter(0)
b = f32[2,5]{0,1} parameter(1)
c = f32[3,5]{0,1} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateOneDegenerateDimOfMany) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,5,1,4]{0,1,3,2} parameter(0)
b = f32[1,5,1,4]{0,1,3,2} parameter(1)
c = f32[2,5,1,4]{0,1,3,2} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateOtherDegenerateDim) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,5]{0,1} parameter(0)
b = f32[1,5]{0,1} parameter(1)
c = f32[1,10]{0,1} concatenate(a, b), dimensions={1}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Reverse) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,3,5]{0,2,1} parameter(0)
b = f32[2,3,5]{0,2,1} reverse(a), dimensions={0,1}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ReverseDegenerateDimensions) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5]{0,2,1} parameter(0)
b = f32[1,3,5]{1,2,0} reverse(a), dimensions={0,1}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Pad) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5,7]{0,2,1,3} parameter(0)
z = f32[] constant(0)
b = f32[1,13,15,7]{0,2,1,3} pad(a, z), padding=0_0x5_5x5_5x0_0
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PadDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5]{0,2,1} parameter(0)
z = f32[] constant(0)
b = f32[11,13,15]{0,2,1} pad(a, z), padding=5_5x5_5x5_5
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PadOtherDimDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5,1]{0,2,1,3} parameter(0)
z = f32[] constant(0)
b = f32[11,13,7,1]{0,2,1,3} pad(a, z), padding=5_5x5_5x1_1x0_0
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ReduceWindow) {
const char* hlo = R"(
HloModule R2Window
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2Window {
operand = f32[256,384]{0,1} parameter(0)
constant = f32[] constant(1)
ROOT reduce-window = f32[256,384]{0,1} reduce-window(operand, constant), window={size=2x3 pad=0_1x1_1}, to_apply=mul
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Constant) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,4]{0,1} parameter(0)
c = f32[5,4]{0,1} constant({...})
ROOT o = f32[5,4]{0,1} add(p, c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConstantAvoidRevisitOfUser) {
const char* hlo = R"(
HloModule module
ENTRY main {
c = f32[5,4]{0,1} constant({...})
s = f32[5,4]{0,1} sine(c)
t = f32[5,4]{0,1} tanh(s)
ROOT o = f32[5,4]{0,1} add(s, t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Slice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{1,3,2,0} parameter(0)
ROOT converted = f32[1,4,6,6]{1,3,2,0} slice(input), slice={[0:1],[0:4],[0:6],[0:6]}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Select) {
const char* hlo = R"(
HloModule module
ENTRY main {
lhs = f32[1,17,9,9]{1,3,2,0} parameter(0)
rhs = f32[1,17,9,9]{1,3,2,0} parameter(1)
p = pred[1,17,9,9]{1,3,2,0} parameter(2)
ROOT out = f32[1,17,9,9]{1,3,2,0} select(p, lhs, rhs), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, SelectScalarPredicate) {
const char* hlo = R"(
HloModule module
ENTRY main {
lhs = f32[1,17,9,9]{1,3,2,0} parameter(0)
rhs = f32[1,17,9,9]{1,3,2,0} parameter(1)
p = pred[] parameter(2)
ROOT out = f32[1,17,9,9]{1,3,2,0} select(p, lhs, rhs), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicSlice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[3,4,32]{1,0,2} parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT out = f32[1,4,32]{1,0,2} dynamic-slice(input, p1, p2, p3), dynamic_slice_sizes={1,4,32}, metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicSliceHasDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,4,32]{1,0,2} parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT out = f32[1,4,32]{1,0,2} dynamic-slice(input, p1, p2, p3), dynamic_slice_sizes={1,4,32}, metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicUpdateSlice) {
const char* hlo = R"(
HloModule m
ENTRY main {
to_update = f32[3,1,32]{1,0,2} parameter(0)
updates = f32[1,1,32]{1,0,2} parameter(1)
p0 = s32[] parameter(2)
p1 = s32[] parameter(3)
p2 = s32[] parameter(4)
ROOT out = f32[3,1,32]{1,0,2} dynamic-update-slice(to_update, updates, p0, p1, p2), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicUpdateSliceNonDeg) {
const char* hlo = R"(
HloModule m
ENTRY main {
to_update = f32[5,3,32]{1,0,2} parameter(0)
updates = f32[1,1,32]{1,0,2} parameter(1)
p0 = s32[] parameter(2)
p1 = s32[] parameter(3)
p2 = s32[] parameter(4)
ROOT out = f32[5,3,32]{1,0,2} dynamic-update-slice(to_update, updates, p0, p1, p2), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Clamp) {
const char* hlo = R"(
HloModule m
ENTRY main {
lb = f32[64,1,32]{1,0,2} parameter(0)
in = f32[64,1,32]{1,0,2} parameter(1)
ub = f32[64,1,32]{1,0,2} parameter(2)
ROOT out = f32[64,1,32]{1,0,2} clamp(f32[64,1,32]{1,0,2} lb, f32[64,1,32]{1,0,2} in, f32[64,1,32]{1,0,2} ub), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ClampScalarBounds) {
const char* hlo = R"(
HloModule m
ENTRY main {
lb = f32[] parameter(0)
in = f32[64,1,32]{1,0,2} parameter(1)
ub = f32[] parameter(2)
ROOT out = f32[64,1,32]{1,0,2} clamp(f32[] lb, f32[64,1,32]{1,0,2} in, f32[] ub), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ClampScalarLb) {
const char* hlo = R"(
HloModule m
ENTRY main {
lb = f32[] parameter(0)
in = f32[64,1,32]{1,0,2} parameter(1)
ub = f32[64,1,32]{1,0,2} parameter(2)
ROOT out = f32[64,1,32]{1,0,2} clamp(f32[] lb, f32[64,1,32]{1,0,2} in, f32[64,1,32]{1,0,2} ub), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BitcastConvertToBiggerType) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = u32[4,2]{0,1} parameter(0)
ROOT out = u64[4]{0} bitcast-convert(u32[4,2]{0,1} p0), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BitcastConvertToSmallerType) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = u64[4]{0} parameter(0)
ROOT out = u32[4,2]{0,1} bitcast-convert(u64[4]{0} p0), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Scatter) {
const char* hlo = R"(
HloModule simplified_scatter
region_0.10 {
Arg_0.11 = s16[] parameter(0)
Arg_1.12 = s16[] parameter(1)
ROOT maximum.13 = s16[] maximum(Arg_0.11, Arg_1.12)
}
ENTRY main.17 {
p0 = s16[3,2,2,14,16]{0,1,4,3,2} parameter(0)
p1 = s32[2,11]{0,1} parameter(1)
p2 = s16[11,3,5]{2,0,1} parameter(2)
ROOT scatter = s16[3,2,2,14,16]{0,1,4,3,2} scatter(p0, p1, p2), update_window_dims={1,2}, inserted_window_dims={1,2,3}, scatter_dims_to_operand_dims={4,0}, index_vector_dim=0, to_apply=region_0.10
}
)";
CheckLayoutNormalization(
hlo, R"(
)",
[](HloModule* module) {
TF_CHECK_OK(ScatterSimplifier().Run(module).status());
});
}
TEST_F(LayoutNormalizationTest, SimplifiedScatter) {
const char* hlo = R"(
HloModule simplified_scatter
region_0.10 {
Arg_0.11 = s16[] parameter(0)
Arg_1.12 = s16[] parameter(1)
ROOT maximum.13 = s16[] maximum(Arg_0.11, Arg_1.12)
}
ENTRY main.17 {
p0 = s16[16,3,2,2,14]{0,4,3,2,1} parameter(0)
p1 = s32[528,2]{1,0} parameter(1)
p2 = s16[528,5,3,1,1,1]{1,2,0,5,4,3} parameter(2)
ROOT scatter = s16[16,3,2,2,14]{0,4,3,2,1} scatter(p0, p1, p2), update_window_dims={1,2,3,4,5}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=region_0.10
}
)";
CheckLayoutNormalization(
hlo, R"(
)",
[](HloModule* module) {
TF_CHECK_OK(ScatterSimplifier().Run(module).status());
});
}
TEST_F(LayoutNormalizationTest, VariadicScatter) {
const char* hlo = R"(
HloModule simplified_scatter
region_0.10 {
Arg_0.11 = s16[] parameter(0)
Arg_1.12 = s16[] parameter(1)
Arg_2.13 = s16[] parameter(2)
Arg_3.14 = s16[] parameter(3)
maximum.15 = s16[] maximum(Arg_0.11, Arg_1.12)
maximum.16 = s16[] maximum(Arg_2.13, Arg_3.14)
ROOT res = (s16[], s16[]) tuple(maximum.15, maximum.16)
}
ENTRY main.17 {
p0 = s16[16,3,2,2,14]{0,4,3,2,1} parameter(0)
p1 = s16[16,3,2,2,14]{0,4,3,2,1} parameter(1)
p2 = s32[528,2]{1,0} parameter(2)
p3 = s16[528,5,3,1,1,1]{1,2,0,5,4,3} parameter(3)
p4 = s16[528,5,3,1,1,1]{1,2,0,5,4,3} parameter(4)
ROOT scatter = (s16[16,3,2,2,14]{0,4,3,2,1}, s16[16,3,2,2,14]{0,4,3,2,1}) scatter(p0, p1, p2, p3, p4), update_window_dims={1,2,3,4,5}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=region_0.10
}
)";
CheckLayoutNormalization(
hlo, R"(
)",
[](HloModule* module) {
TF_CHECK_OK(ScatterSimplifier().Run(module).status());
});
}
TEST_F(LayoutNormalizationTest, CompareInt4) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = s4[10]{0:E(4)} parameter(0)
b = s4[10]{0:E(4)} parameter(1)
ROOT out = compare(a, b), direction=EQ
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/layout_normalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/layout_normalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits