ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,900 | cpp | tensorflow/tensorflow | flatten_call_graph | third_party/xla/xla/service/flatten_call_graph.cc | third_party/xla/xla/service/flatten_call_graph_test.cc | #ifndef XLA_SERVICE_FLATTEN_CALL_GRAPH_H_
#define XLA_SERVICE_FLATTEN_CALL_GRAPH_H_
#include "absl/status/statusor.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class FlattenCallGraph : public HloModulePass {
public:
absl::string_view name() const override { return "flatten-call-graph"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/flatten_call_graph.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/call_graph.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
void ReplaceCalledComputation(HloInstruction* instruction,
HloComputation* computation,
HloComputation* new_computation) {
switch (instruction->opcode()) {
case HloOpcode::kWhile: {
if (computation == instruction->while_condition()) {
instruction->set_while_condition(new_computation);
} else {
CHECK_EQ(computation, instruction->while_body());
instruction->set_while_body(new_computation);
}
break;
}
case HloOpcode::kCall: {
CHECK_EQ(instruction->to_apply(), computation);
instruction->set_to_apply(new_computation);
break;
}
case HloOpcode::kConditional: {
for (int b = 0; b < instruction->branch_count(); ++b) {
if (b == instruction->branch_count() - 1) {
CHECK_EQ(computation, instruction->branch_computation(b));
}
if (computation == instruction->branch_computation(b)) {
instruction->set_branch_computation(b, new_computation);
break;
}
}
break;
}
default:
LOG(FATAL) << "unexpected opcode: " << instruction->opcode();
}
}
absl::Status FlattenNode(const CallGraphNode& node) {
HloComputation* computation = node.computation();
HloModule* module = computation->parent();
for (int i = 0; i < node.caller_callsites().size(); ++i) {
CallSite call_site = node.caller_callsites()[i];
if (call_site.context() == CallContext::kEmbedded) {
continue;
}
CHECK_EQ(call_site.context(), CallContext::kControlFlow);
if (node.context() != CallContext::kBoth && i == 0) {
continue;
}
if (computation->IsAsyncComputation()) {
continue;
}
HloComputation* clone =
module->AddEmbeddedComputation(computation->Clone());
ReplaceCalledComputation(call_site.instruction(), computation, clone);
std::vector<HloComputation*> worklist;
worklist.push_back(clone);
while (!worklist.empty()) {
auto current = worklist.back();
worklist.pop_back();
for (auto* instruction : current->instructions()) {
if (GetInstructionCallContext(instruction->opcode()) !=
CallContext::kControlFlow) {
continue;
}
for (auto callee : instruction->called_computations()) {
HloComputation* callee_clone =
module->AddEmbeddedComputation(callee->Clone());
ReplaceCalledComputation(instruction, callee, callee_clone);
worklist.push_back(callee_clone);
}
}
}
}
return absl::OkStatus();
}
absl::Status AnnotateNode(const CallGraphNode& node) {
for (auto& callsite : node.callsites()) {
HloInstruction* instruction = callsite.instruction();
if (instruction->opcode() == HloOpcode::kFusion) {
for (HloComputation* computation : instruction->called_computations()) {
computation->SetFusionInstruction(instruction);
}
} else if (instruction->opcode() == HloOpcode::kCustomCall) {
for (HloComputation* computation : instruction->called_computations()) {
computation->SetCustomCallInstruction(instruction);
}
} else if (hlo_query::IsCollectiveCommunicationOp(instruction->opcode())) {
for (HloComputation* computation : instruction->called_computations()) {
computation->SetCollectiveCallInstruction(instruction);
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
instruction->while_body()->SetWhileCallInstruction(instruction);
} else if (instruction->opcode() == HloOpcode::kConditional) {
for (HloComputation* branch : instruction->branch_computations()) {
branch->SetConditionalCallInstruction(instruction);
}
}
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> FlattenCallGraph::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(3, "Before flatten call graph:\n" + module->ToString());
{
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module, execution_threads);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(FlattenNode));
}
{
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(module, execution_threads);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(AnnotateNode));
}
XLA_VLOG_LINES(3, "After flatten call graph:\n" + module->ToString());
return true;
}
} | #include "xla/service/flatten_call_graph.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/literal.h"
#include "xla/service/call_graph.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class FlattenCallGraphTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> MakeScalarComputation() {
HloComputation::Builder builder(TestName() + ".ScalarComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(kScalarShape, HloOpcode::kNegate, param0));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeMappingComputation(
HloComputation* map_computation, int64_t callsites) {
HloComputation::Builder builder(TestName() + ".MappingComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateMap(
kScalarShape, {last_value}, map_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeCallingComputation(
HloComputation* callee_computation, int64_t callsites,
const std::string& suffix = ".CallingComputation") {
HloComputation::Builder builder(TestName() + suffix);
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateCall(
kScalarShape, {last_value}, callee_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeConditionComputation() {
HloComputation::Builder builder(TestName() + ".ConditionComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
zero, ComparisonDirection::kGt));
return builder.Build();
}
absl::StatusOr<bool> RunFlattenCallGraph(HloModule* module) {
FlattenCallGraph flatten;
TF_ASSIGN_OR_RETURN(bool result, flatten.Run(module));
return result;
}
const Shape kScalarShape = ShapeUtil::MakeShape(F32, {});
};
TEST_F(FlattenCallGraphTest, ComplexGraph) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloComputation* a_computation;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
{
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> flat_call_graph = CallGraph::Build(module.get());
const CallGraphNode& c_node = flat_call_graph->GetNode(c_computation);
EXPECT_EQ(1, c_node.caller_callsites().size());
}
}
TEST_F(FlattenCallGraphTest, SharedWhileConditionAndBody) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation;
{
HloComputation::Builder builder(TestName() + ".cond");
HloInstruction* param0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(PRED, {}), "param0"));
HloInstruction* false_constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), param0, false_constant,
ComparisonDirection::kEq));
cond_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* false_constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateWhile(
ShapeUtil::MakeShape(PRED, {}), cond_computation, cond_computation,
false_constant));
entry_computation = module->AddEntryComputation(builder.Build());
}
{
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
const CallGraphNode& cond_node = call_graph->GetNode(cond_computation);
EXPECT_EQ(2, cond_node.caller_callsites().size());
}
{
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
const CallGraphNode& cond_node = call_graph->GetNode(cond_computation);
EXPECT_EQ(1, cond_node.caller_callsites().size());
}
}
TEST_F(FlattenCallGraphTest, FlattenCalls) {
auto module = CreateNewVerifiedModule();
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeCallingComputation(c_computation, 2, ".B"));
module->AddEntryComputation(
MakeCallingComputation(b_computation, 2, ".Entry"));
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(7, module->computation_count());
const CallGraphNode& c_node = call_graph->GetNode(c_computation);
EXPECT_EQ(1, c_node.caller_callsites().size());
const CallGraphNode& b_node = call_graph->GetNode(b_computation);
EXPECT_EQ(1, b_node.caller_callsites().size());
}
TEST_F(FlattenCallGraphTest, FlattenCallsInConditional) {
auto module = CreateNewVerifiedModule();
HloComputation* sub_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation::Builder builder(TestName());
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.0f)));
builder.AddInstruction(HloInstruction::CreateConditional(
kScalarShape, pred, constant1, sub_computation, constant2,
sub_computation));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, module->computation_count());
TF_ASSERT_OK_AND_ASSIGN(bool result, RunFlattenCallGraph(module.get()));
EXPECT_TRUE(result);
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(3, module->computation_count());
const CallGraphNode& sub_node = call_graph->GetNode(sub_computation);
EXPECT_EQ(1, sub_node.caller_callsites().size());
}
}
} |
1,901 | cpp | tensorflow/tensorflow | simplify_fp_conversions | third_party/xla/xla/service/simplify_fp_conversions.cc | third_party/xla/xla/service/gpu/tests/simplify_fp_conversions_test.cc | #ifndef XLA_SERVICE_SIMPLIFY_FP_CONVERSIONS_H_
#define XLA_SERVICE_SIMPLIFY_FP_CONVERSIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class SimplifyFPConversions : public HloModulePass {
public:
explicit SimplifyFPConversions() = default;
absl::string_view name() const override { return "simplify-fp-conversions"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/simplify_fp_conversions.h"
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<bool> RunOnComputation(HloComputation& computation) {
bool changed = false;
for (HloInstruction* instruction : computation.MakeInstructionPostOrder()) {
HloInstruction* input = instruction;
size_t convert_chain_length = 0;
while (input->opcode() == HloOpcode::kConvert &&
primitive_util::IsFloatingPointType(input->shape().element_type())) {
input = input->mutable_operand(0);
++convert_chain_length;
}
if (convert_chain_length < 2) {
continue;
}
if (instruction->shape().element_type() == input->shape().element_type()) {
TF_RETURN_IF_ERROR(
instruction->parent()->ReplaceInstruction(instruction, input));
} else {
TF_RETURN_IF_ERROR(instruction->parent()->ReplaceWithNewInstruction(
instruction,
HloInstruction::CreateConvert(instruction->shape(), input)));
}
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> SimplifyFPConversions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, absl::StrFormat("SimplifyFPConversions::Run() with before:\n%s",
module->ToString()));
bool changed = false;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(*computation));
changed |= comp_changed;
}
XLA_VLOG_LINES(2,
absl::StrFormat("SimplifyFPConversions::Run() with after:\n%s",
module->ToString()));
return changed;
}
} | #include <string_view>
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
namespace {
class SimplifyFPConversionsTest : public HloTestBase {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_allow_excess_precision(
enable_simplify_all_fp_conversions_);
return debug_options;
}
bool SupportsMultiplyBF16() {
const auto& device_description =
backend().default_stream_executor()->GetDeviceDescription();
const auto& cc = device_description.gpu_compute_capability();
return std::holds_alternative<se::CudaComputeCapability>(cc) &&
std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper();
}
void SetEnableSimplifyFpConversions(bool enable_simplify_all_fp_conversions) {
enable_simplify_all_fp_conversions_ = enable_simplify_all_fp_conversions;
}
static constexpr std::string_view kHloText = R"(
HloModule module
ENTRY main {
param0 = bf16[1536]{0} parameter(0)
param1 = bf16[4,1536]{1,0} parameter(1)
s = bf16[1536]{0} rsqrt(param0)
b = bf16[4,1536]{1,0} broadcast(s), dimensions={1}
ROOT d = bf16[4,1536]{1,0} multiply(b, param1)
}
)";
private:
bool enable_simplify_all_fp_conversions_ = false;
};
TEST_F(SimplifyFPConversionsTest, RedundantTypeConversionsGetCleanedUp) {
SetEnableSimplifyFpConversions(true);
if (SupportsMultiplyBF16()) {
MatchOptimizedHlo(kHloText, R"(
)");
} else {
MatchOptimizedHlo(kHloText, R"(
)");
}
}
TEST_F(SimplifyFPConversionsTest, RedundantTypeConversionsArePresentInTest) {
if (SupportsMultiplyBF16()) {
GTEST_SKIP() << "No double convert is expected on Hopper";
}
SetEnableSimplifyFpConversions(false);
MatchOptimizedHlo(kHloText, R"(
)");
}
}
}
} |
1,902 | cpp | tensorflow/tensorflow | async_collective_creator | third_party/xla/xla/service/async_collective_creator.cc | third_party/xla/xla/service/async_collective_creator_test.cc | #ifndef XLA_SERVICE_ASYNC_COLLECTIVE_CREATOR_H_
#define XLA_SERVICE_ASYNC_COLLECTIVE_CREATOR_H_
#include <functional>
#include <utility>
#include <vector>
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AsyncCollectiveCreator : public HloModulePass {
public:
using ContextShapeQuery =
std::function<std::vector<Shape>(const HloInstruction *)>;
struct CollectiveCreatorConfig {
HloPredicate convert_all_reduce = HloPredicateFalse;
HloPredicate convert_all_gather = HloPredicateFalse;
HloPredicate convert_collective_broadcast = HloPredicateFalse;
HloPredicate convert_collective_permute = HloPredicateFalse;
HloPredicate convert_all_to_all = HloPredicateFalse;
HloPredicate convert_reduce_scatter = HloPredicateFalse;
ContextShapeQuery get_context_shapes = [](const HloInstruction *) {
return std::vector<Shape>{};
};
};
explicit AsyncCollectiveCreator(CollectiveCreatorConfig creator_config)
: config_(std::move(creator_config)) {}
absl::string_view name() const override { return "async-collective-creator"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) override;
std::vector<HloInstruction *> MatchCollectives(HloComputation *computation);
absl::StatusOr<bool> ReplaceCollectives(
HloComputation *computation,
std::vector<HloInstruction *> &supported_collectives);
const CollectiveCreatorConfig *config() const { return &config_; }
private:
CollectiveCreatorConfig config_;
};
}
#endif
#include "xla/service/async_collective_creator.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
struct ReplacedAsync {
HloInstruction* start;
HloInstruction* done;
};
absl::StatusOr<ReplacedAsync> CreateAsyncAllReduce(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ar = Cast<HloAllReduceInstruction>(instruction);
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllReduceStart(
ar->shape(), ar->operands(), ar->to_apply(), ar->device_list(),
ar->constrain_layout(), ar->channel_id(),
ar->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ar->shape(), HloOpcode::kAllReduceDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncAllGather(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ag = Cast<HloAllGatherInstruction>(instruction);
std::vector<const Shape*> operand_shapes;
operand_shapes.reserve(ag->operand_count());
for (const HloInstruction* op : ag->operands()) {
operand_shapes.push_back(&op->shape());
}
Shape shape = ShapeUtil::MakeTupleShape(
{ag->operand_count() > 1
? ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes)
: *operand_shapes[0],
ag->shape()});
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllGatherStart(
shape, ag->operands(), ag->all_gather_dimension(), ag->device_list(),
ag->constrain_layout(), ag->channel_id(),
ag->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ag->shape(), HloOpcode::kAllGatherDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncCollectivePermute(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
auto* cp = Cast<HloCollectivePermuteInstruction>(instruction);
HloInstruction* start;
HloInstruction* operand = cp->mutable_operand(0);
if (cp->operand_count() == 1) {
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(
{&operand->shape()}, context_shapes)
.value(),
operand, cp->source_target_pairs(), cp->channel_id()));
} else {
CHECK_EQ(cp->operand_count(), 4);
std::vector<const Shape*> operand_shapes;
absl::c_transform(
cp->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(operand_shapes,
context_shapes)
.value(),
operand, cp->mutable_operand(1), cp->mutable_operand(2),
cp->mutable_operand(3), cp->source_target_pairs(),
cp->dynamic_slice_sizes_list(), cp->channel_id()));
if (HasDisjointReadWriteRegionsAttr(cp)) {
SetDisjointReadWriteRegionsAttr(start);
}
}
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
cp->shape(), HloOpcode::kCollectivePermuteDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncStartDone(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
TF_ASSIGN_OR_RETURN(
HloInstruction * done,
computation->CreateAsyncInstructions(instruction, context_shapes,
HloInstruction::kMainExecutionThread,
false));
HloInstruction* start = done->mutable_operand(0);
return ReplacedAsync{start, done};
}
}
std::vector<HloInstruction*> AsyncCollectiveCreator::MatchCollectives(
HloComputation* computation) {
std::vector<HloInstruction*> supported_collectives;
for (HloInstruction* instruction : computation->instructions()) {
const HloOpcode op = instruction->opcode();
if ((op == HloOpcode::kAllReduce &&
config_.convert_all_reduce(instruction)) ||
(op == HloOpcode::kAllGather &&
config_.convert_all_gather(instruction)) ||
(op == HloOpcode::kCollectiveBroadcast &&
config_.convert_collective_broadcast(instruction)) ||
(op == HloOpcode::kCollectivePermute &&
config_.convert_collective_permute(instruction)) ||
(op == HloOpcode::kAllToAll &&
config_.convert_all_to_all(instruction)) ||
(op == HloOpcode::kReduceScatter &&
config_.convert_reduce_scatter(instruction))) {
supported_collectives.push_back(instruction);
}
}
return supported_collectives;
}
absl::StatusOr<bool> AsyncCollectiveCreator::ReplaceCollectives(
HloComputation* computation,
std::vector<HloInstruction*>& supported_collectives) {
bool changed = false;
HloModule* module = computation->parent();
absl::flat_hash_map<HloInstruction*, ReplacedAsync> replaced_pairs;
const bool should_update_schedule =
module->has_schedule() &&
module->schedule().is_computation_scheduled(computation);
for (HloInstruction* instruction : supported_collectives) {
absl::StatusOr<ReplacedAsync> async_pair;
switch (instruction->opcode()) {
case HloOpcode::kAllReduce:
async_pair = CreateAsyncAllReduce(instruction);
break;
case HloOpcode::kAllGather:
async_pair = CreateAsyncAllGather(instruction);
break;
case HloOpcode::kCollectivePermute:
async_pair = CreateAsyncCollectivePermute(
instruction, config_.get_context_shapes(instruction));
break;
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kAllToAll:
case HloOpcode::kReduceScatter:
async_pair = CreateAsyncStartDone(
instruction, config_.get_context_shapes(instruction));
break;
default:
return Internal("Unexpected opcode %s",
HloOpcodeString(instruction->opcode()));
}
TF_RETURN_IF_ERROR(async_pair.status());
async_pair->start->set_metadata(instruction->metadata());
async_pair->start->CopyBackendConfigFrom(instruction);
if (should_update_schedule) {
replaced_pairs[instruction] = *async_pair;
}
TF_RETURN_IF_ERROR(
instruction->CopyAllControlDepsTo(async_pair->start, async_pair->done));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
computation->ReplaceInstruction(instruction, async_pair->done),
"replacing ", instruction->ToShortString());
changed = true;
}
if (should_update_schedule) {
std::vector<HloInstruction*> new_sequence;
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
new_sequence.reserve(sequence.size() + replaced_pairs.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_pairs.find(instr);
if (it != replaced_pairs.end()) {
new_sequence.push_back(it->second.start);
new_sequence.push_back(it->second.done);
continue;
}
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
}
return changed;
}
absl::StatusOr<bool> AsyncCollectiveCreator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
int64_t collectives_replaced = 0;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::vector<HloInstruction*> supported_collectives =
MatchCollectives(computation);
if (supported_collectives.empty()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool comp_changed,
ReplaceCollectives(computation, supported_collectives));
collectives_replaced += supported_collectives.size();
changed |= comp_changed;
}
VLOG(1) << "Replaced " << collectives_replaced
<< " sync collectives with async versions.";
return changed;
}
} | #include "xla/service/async_collective_creator.h"
#include <string>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::NotNull;
using ::testing::SizeIs;
using AsyncAllReduceCreatorTest = HloTestBase;
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllReduce) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[8] parameter(0)
ROOT ar = f32[8] all-reduce(p0), to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_reduce = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllReduceDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllReduceStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllGather) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ROOT ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllGatherDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllGatherStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleInPlaceCollectivePermute) {
std::string hlo_string = std::string(R"(
HloModule module
ENTRY %module_spmd () -> f32[4,4,128] {
%constant.8 = u32[] constant(0)
%constant.5 = u32[] constant(2)
%tuple.1 = (u32[], u32[], u32[]) tuple(u32[] %constant.8, u32[] %constant.8, u32[] %constant.8)
%tuple = (u32[], u32[], u32[]) tuple(u32[] %constant.5, u32[] %constant.8, u32[] %constant.8)
%custom-call = f32[4,4,128]{2,1,0:T(4,128)} custom-call(), custom_call_target="SomeCustomCall"
ROOT %collective-permute = f32[4,4,128]{2,1,0:T(4,128)} collective-permute(f32[4,4,128]{2,1,0:T(4,128)} %custom-call, f32[4,4,128]{2,1,0:T(4,128)} %custom-call, (u32[], u32[], u32[]) %tuple, (u32[], u32[], u32[]) %tuple.1), channel_id=958, source_target_pairs={{0,4},{4,0},{1,5},{5,1},{2,6},{6,2},{3,7},{7,3}}, slice_sizes={{2,4,128}}
}
)");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 7);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermuteScheduled) {
constexpr absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
const int64_t original_instr_sequence_size =
hlo_module->schedule().sequence(hlo_module->entry_computation()).size();
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(
hlo_module->schedule().sequence(hlo_module->entry_computation()).size(),
original_instr_sequence_size + 1);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectiveBroadcast) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT cb = f32[8,16] collective-broadcast(p0), replica_groups={{7,0,1,2,3,4,5,6}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_broadcast = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kCollectiveBroadcast);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllToAll) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[8,16] all-to-all(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_to_all = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kAllToAll);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleReduceScatter) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[1,16] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_reduce_scatter = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kReduceScatter);
}
TEST_F(AsyncAllReduceCreatorTest, ControlPredecessor) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, control-predecessors={p0}
p1 = f32[1] parameter(1), control-predecessors={ag}
ROOT sum = add(ag, ag)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(
RunHloPass(AsyncCollectiveCreator(config), hlo_module.get()).status());
SCOPED_TRACE(hlo_module->ToString());
HloInstruction* start;
HloInstruction* done;
ASSERT_THAT(
hlo_module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Op(),
m::Op(&done)
.WithOpcode(HloOpcode::kAllGatherDone)
.WithOperand(0, m::Op(&start).WithOpcode(
HloOpcode::kAllGatherStart)))));
EXPECT_EQ(start->control_successors().size(), 0);
ASSERT_EQ(start->control_predecessors().size(), 1);
EXPECT_THAT(start->control_predecessors()[0], GmockMatch(m::Parameter(0)));
EXPECT_EQ(done->control_predecessors().size(), 0);
ASSERT_EQ(done->control_successors().size(), 1);
EXPECT_THAT(done->control_successors()[0], GmockMatch(m::Parameter(1)));
}
}
} |
1,903 | cpp | tensorflow/tensorflow | convert_operand_folding | third_party/xla/xla/service/convert_operand_folding.cc | third_party/xla/xla/service/convert_operand_folding_test.cc | #ifndef XLA_SERVICE_CONVERT_OPERAND_FOLDING_H_
#define XLA_SERVICE_CONVERT_OPERAND_FOLDING_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class ConvertOperandFolding : public OpExpanderPass {
public:
absl::string_view name() const override { return "convert_operand_folding"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/convert_operand_folding.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
bool IsUpcastConvert(const HloInstruction* hlo) {
if (!hlo->shape().IsArray()) {
return false;
}
switch (hlo->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose: {
return IsUpcastConvert(hlo->operand(0));
}
case HloOpcode::kReduce: {
if (ShapeUtil::ElementsIn(hlo->shape()) ==
ShapeUtil::ElementsIn(hlo->operand(0)->shape())) {
return IsUpcastConvert(hlo->operand(0));
}
return false;
}
case HloOpcode::kConvert:
return primitive_util::CastPreservesValues(
hlo->operand(0)->shape().element_type(), hlo->shape().element_type());
default:
return false;
}
}
HloInstruction* EffectiveOperand(HloInstruction* hlo) {
switch (hlo->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose: {
HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0));
HloInstruction* clone = hlo->AddInstruction(hlo->Clone());
*(clone->mutable_shape()) = ShapeUtil::ChangeElementType(
clone->shape(), operand->shape().element_type());
clone->ReplaceOperandWithDifferentShape(0, operand).IgnoreError();
return clone;
}
case HloOpcode::kReduce: {
HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0));
return hlo->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::ChangeElementType(hlo->shape(),
operand->shape().element_type()),
operand));
}
case HloOpcode::kConvert:
return hlo->mutable_operand(0);
default:
return nullptr;
}
}
}
bool ConvertOperandFolding::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDot &&
instruction->opcode() != HloOpcode::kConvolution) {
return false;
}
for (auto* operand : instruction->operands()) {
if (IsUpcastConvert(operand)) {
return true;
}
}
return false;
}
absl::StatusOr<HloInstruction*> ConvertOperandFolding::ExpandInstruction(
HloInstruction* instruction) {
for (int i = 0; i < instruction->operand_count(); ++i) {
auto* operand = instruction->mutable_operand(i);
if (IsUpcastConvert(operand)) {
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape(
i, EffectiveOperand(operand)));
}
}
return nullptr;
}
} | #include "xla/service/convert_operand_folding.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ConvertOperandFoldingTest = HloTestBase;
TEST_F(ConvertOperandFoldingTest, IntegralUpcastConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s16[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, FloatingUpcastConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f16[2,3]{1,0} parameter(0)
p1 = bf16[3,2]{0,1} parameter(1)
c0 = f32[2,3]{1,0} convert(p0)
c1 = f32[3,2]{0,1} convert(p1)
ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, IntegralToFloatingConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = f16[2,3]{1,0} convert(p0)
c1 = f32[3,2]{0,1} convert(p1)
ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, DowncastConvertNotFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s32[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s8[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_FALSE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(
op::Dot(
AllOf(op::Convert(op::Parameter(0)), op::Shape("s16[2,3]{1,0}")),
AllOf(op::Convert(op::Parameter(1)), op::Shape("s8[3,2]{0,1}"))),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, OneOperandFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s8[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), AllOf(op::Convert(op::Parameter(1)),
op::Shape("s8[3,2]{0,1}"))),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, FoldedWithFormatting) {
absl::string_view module_string = R"(
HloModule module
sum {
a = s16[] parameter(0)
b = s16[] parameter(1)
ROOT r = add(a,b)
}
ENTRY main {
p0 = s8[3,10] parameter(0)
c0 = s16[3,10] convert(p0)
r0 = s16[3,2,5] reshape(c0)
t0 = s16[2,5,3] transpose(r0), dimensions={1,2,0}
s0 = s16[2,1,3] slice(t0), slice={[0:2], [2:3], [0:3]}
rs0 = s16[2,3] reshape(s0)
p1 = s8[3,1,2] parameter(1)
c1 = s16[3,1,2] convert(p1)
r1 = s16[1,3,2] transpose(c1), dimensions={1,0,2}
z = s16[] constant(0)
rr1 = s16[3,2] reduce(r1,z), dimensions={0}, to_apply=sum
ROOT dot = s16[2,2] dot(rs0, rr1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(
op::Reshape(op::Slice(op::Transpose(op::Reshape(op::Parameter(0))))),
op::Reshape(op::Transpose(op::Parameter(1)))));
}
TEST_F(ConvertOperandFoldingTest, FoldedWithDSAndGather) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[100,3] parameter(0)
c0 = s16[100,3] convert(p0)
ids = s32[20] parameter(2)
g = s16[20,3] gather(c0, ids), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3}
t = s16[3,20] transpose(g), dimensions={1,0}
p1 = s8[25,3] parameter(1)
c1 = s16[25,3] convert(p1)
z = s32[] constant(0)
s = s32[] parameter(3)
ds = s16[20,3] dynamic-slice(c1, s, z), dynamic_slice_sizes={20,3}
ROOT dot = s16[3,3] dot(t, ds), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(op::Transpose(op::Gather(op::Parameter(0), op::Parameter(2))),
op::DynamicSlice(op::Parameter(1), op::Parameter(3),
op::Constant())));
}
}
} |
1,904 | cpp | tensorflow/tensorflow | convert_mover | third_party/xla/xla/service/convert_mover.cc | third_party/xla/xla/service/convert_mover_test.cc | #ifndef XLA_SERVICE_CONVERT_MOVER_H_
#define XLA_SERVICE_CONVERT_MOVER_H_
#include <functional>
#include <utility>
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConvertMover : public HloModulePass {
public:
ConvertMover() = default;
absl::string_view name() const override { return "convert-mover"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/convert_mover.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
namespace {
static bool IsLosslesslyConvertibleTo(const Literal& literal,
PrimitiveType dst_ty) {
PrimitiveType orig_ty = literal.shape().element_type();
absl::StatusOr<Literal> converted1 = literal.Convert(dst_ty);
if (!converted1.ok()) {
return false;
}
absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty);
if (!converted2.ok()) {
return false;
}
return literal == *converted2;
}
bool OpCommutesWithConvert(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kConcatenate:
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
absl::StatusOr<bool> MoveConvertPrecisionOps(HloComputation* comp) {
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (!OpCommutesWithConvert(instr->opcode()) ||
instr->operand_count() == 0 ||
!absl::c_all_of(instr->operands(), [](const HloInstruction* operand) {
return (operand->opcode() == HloOpcode::kConvert &&
operand->user_count() == 1) ||
operand->opcode() == HloOpcode::kConstant;
})) {
continue;
}
auto convert_op_it = absl::c_find_if(instr->operands(),
HloPredicateIsOp<HloOpcode::kConvert>);
if (convert_op_it == instr->operands().end()) {
continue;
}
const HloInstruction* convert_op = *convert_op_it;
if (!absl::c_all_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() != HloOpcode::kConvert ||
operand->operand(0)->shape().element_type() ==
convert_op->operand(0)->shape().element_type();
})) {
continue;
}
PrimitiveType src_ty = convert_op->operand(0)->shape().element_type();
PrimitiveType dst_ty = convert_op->shape().element_type();
if (primitive_util::BitWidth(src_ty) >= primitive_util::BitWidth(dst_ty)) {
continue;
}
if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kConstant &&
!IsLosslesslyConvertibleTo(operand->literal(), src_ty);
})) {
continue;
}
if (primitive_util::IsSubByteNonPredType(src_ty)) {
continue;
}
VLOG(2) << "Moving increase-precision convert op " << convert_op->ToString()
<< " down the graph: " << instr->ToString();
absl::InlinedVector<HloInstruction*, 8> new_operands;
new_operands.reserve(instr->operand_count());
for (HloInstruction* operand : instr->operands()) {
switch (operand->opcode()) {
case HloOpcode::kConvert:
new_operands.push_back(operand->mutable_operand(0));
break;
case HloOpcode::kConstant:
new_operands.push_back(MakeConvertToHlo(operand, src_ty));
break;
default:
LOG(FATAL) << "Unexpected opcode in " << operand->ToString();
}
}
Shape new_shape = instr->shape();
new_shape.set_element_type(src_ty);
HloInstruction* new_instr = comp->AddInstruction(
instr->CloneWithNewOperands(new_shape, new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instr, HloInstruction::CreateConvert(instr->shape(), new_instr)));
changed = true;
}
std::deque<HloInstruction*> work_queue;
std::vector<HloInstruction*> instrs = comp->MakeInstructionPostOrder();
work_queue.insert(work_queue.end(), instrs.rbegin(), instrs.rend());
while (!work_queue.empty()) {
HloInstruction* instr = work_queue.front();
work_queue.pop_front();
if (instr->opcode() != HloOpcode::kConvert ||
instr->operand(0)->user_count() != 1 ||
!OpCommutesWithConvert(instr->operand(0)->opcode())) {
continue;
}
PrimitiveType src_ty = instr->operand(0)->shape().element_type();
PrimitiveType dst_ty = instr->shape().element_type();
if (primitive_util::BitWidth(src_ty) <= primitive_util::BitWidth(dst_ty)) {
continue;
}
if (primitive_util::IsSubByteNonPredType(dst_ty)) {
continue;
}
VLOG(2) << "Moving decrease-precision convert up the graph: "
<< instr->ToString();
HloInstruction* to_convert = instr->mutable_operand(0);
absl::InlinedVector<HloInstruction*, 8> new_operands;
new_operands.reserve(to_convert->operand_count());
for (HloInstruction* operand : to_convert->operands()) {
work_queue.push_front(MakeConvertToHlo(operand, dst_ty));
new_operands.push_back(work_queue.front());
}
Shape new_shape = to_convert->shape();
new_shape.set_element_type(dst_ty);
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instr, to_convert->CloneWithNewOperands(new_shape, new_operands)));
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> ConvertMover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool changed_computation,
MoveConvertPrecisionOps(comp));
changed |= changed_computation;
}
return changed;
}
} | #include "xla/service/convert_mover.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class ConvertMoverTest : public HloTestBase {
public:
ConvertMoverTest()
: HloTestBase(false,
false) {}
};
template <typename T>
auto MatchConvertToS8(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(S8));
}
template <typename T>
auto MatchConvertToF16(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(F16));
}
template <typename T>
auto MatchConvertToF32(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(F32));
}
template <typename T>
auto MatchConvertToC64(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(C64));
}
TEST_F(ConvertMoverTest, MoveDownThroughConcat) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(f32[10] convert(f16[10] parameter(0)),
f32[10] convert(f16[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(
m::Concatenate(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(ConvertMoverTest, NoMoveDownThroughConcatWithDifferentSrcTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(f32[10] convert(bf16[10] parameter(0)),
f32[10] convert(f16[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ConvertMoverTest, MoveUpReshape) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = f16[10,10] convert(f32[10,10] reshape(f32[100] parameter(0)))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(MatchConvertToF16(m::Parameter(0)))));
}
TEST_F(ConvertMoverTest, MoveUpTwoTransposes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
t1 = transpose(f32[3,4] parameter(0)), dimensions={1,0}
t2 = transpose(t1), dimensions={1,0}
ROOT root = f16[3,4] convert(t2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Transpose(MatchConvertToF16(m::Parameter(0))))));
}
TEST_F(ConvertMoverTest, MoveDownTwoSlices) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
slice1 = f32[9] slice(f32[10] convert(f16[10] parameter(0))), slice={[0:9]}
ROOT slice2 = f32[8] slice(slice1), slice={[0:8]}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(m::Slice(m::Slice(m::Parameter(0))))));
}
TEST_F(ConvertMoverTest, MoveDownC64) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(c64[10] convert(f32[10] parameter(0)),
c64[10] convert(f32[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToC64(m::Concatenate(
m::Parameter(0),
m::Parameter(1)
))));
}
TEST_F(ConvertMoverTest, MoveDownC64Constant) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(c64[2] convert(f32[2] parameter(0)),
c64[2] convert(f32[2] parameter(1)),
c64[2] constant({(1,1), (-1,-1)})),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ConvertMoverTest, MoveUpPad) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
pad = f32[10] pad(f32[8] parameter(0), f32[] constant(0)), padding=1_1
ROOT root = f16[10] convert(pad)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(MatchConvertToF16(m::Parameter(0)),
MatchConvertToF16(m::ConstantEffectiveScalar(0)))));
}
TEST_F(ConvertMoverTest, MoveUpPadWithOutOfRangeConstant) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
pad = s32[10] pad(s32[8] parameter(0), s32[] constant(1000)), padding=1_1
ROOT root = s8[10] convert(pad)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(MatchConvertToS8(m::Parameter(0)),
MatchConvertToS8(m::ConstantEffectiveScalar(1000)))));
}
TEST_F(ConvertMoverTest, MoveDownPad) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(0)),
padding=1_1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(m::Pad(
m::Parameter(0), MatchConvertToF16(m::ConstantEffectiveScalar(0))))));
}
TEST_F(ConvertMoverTest, NoMoveDownPadBecauseConstantIsOutOfRange) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(1e9)),
padding=1_1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
}
} |
1,905 | cpp | tensorflow/tensorflow | real_imag_expander | third_party/xla/xla/service/real_imag_expander.cc | third_party/xla/xla/service/real_imag_expander_test.cc | #ifndef XLA_SERVICE_REAL_IMAG_EXPANDER_H_
#define XLA_SERVICE_REAL_IMAG_EXPANDER_H_
#include "xla/service/op_expander_pass.h"
namespace xla {
class RealImagExpander : public OpExpanderPass {
public:
absl::string_view name() const override { return "real_imag_expander"; }
protected:
bool InstructionMatchesPattern(HloInstruction* inst) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* inst) override;
};
}
#endif
#include "xla/service/real_imag_expander.h"
#include "xla/literal_util.h"
namespace xla {
bool RealImagExpander::InstructionMatchesPattern(HloInstruction* inst) {
return (inst->opcode() == HloOpcode::kReal ||
inst->opcode() == HloOpcode::kImag) &&
!ShapeUtil::ElementIsComplex(inst->operand(0)->shape());
}
absl::StatusOr<HloInstruction*> RealImagExpander::ExpandInstruction(
HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kReal) {
return inst->mutable_operand(0);
} else {
HloComputation* comp = inst->parent();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(inst->operand(0)->shape().element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(inst->shape(), zero, {}));
return zero;
}
}
} | #include "xla/service/real_imag_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
class RealImagExpanderTest : public HloTestBase {};
TEST_F(RealImagExpanderTest, RealWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule real_float
ENTRY main {
input = f32[4] parameter(0)
ROOT real = real(input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Parameter(0)));
}
TEST_F(RealImagExpanderTest, ImagWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule imag_float
ENTRY main {
input = f32[4,2,8] parameter(0)
ROOT imag = imag(input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Broadcast()));
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(RealImagExpanderTest, RealImagWithComplexInput) {
const char* kModuleStr = R"(
HloModule real_float
ENTRY main {
input = c64[4] parameter(0)
real = real(input)
imag = imag(input)
ROOT t = tuple(real, imag)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(RealImagExpanderTest, MultipleImagWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule imag_float
ENTRY main {
input = f32[4,2,8] parameter(0)
imag1 = imag(input)
ROOT imag2 = imag(imag1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
auto param = module->entry_computation()->parameter_instruction(0);
HloInstruction* imag1 =
module->entry_computation()->root_instruction()->mutable_operand(0);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_imag,
MakeUnaryHlo(HloOpcode::kImag, param));
TF_ASSERT_OK(
module->entry_computation()->ReplaceInstruction(imag1, new_imag));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Broadcast()));
XLA_VLOG_LINES(1, module->ToString());
}
}
} |
1,906 | cpp | tensorflow/tensorflow | map_inliner | third_party/xla/xla/service/map_inliner.cc | third_party/xla/xla/service/map_inliner_test.cc | #ifndef XLA_SERVICE_MAP_INLINER_H_
#define XLA_SERVICE_MAP_INLINER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class MapInliner : public HloModulePass {
public:
~MapInliner() override = default;
absl::string_view name() const override { return "map-inline"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/map_inliner.h"
#include <memory>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
class MapInlinerVisitor : public DfsHloVisitorWithDefault {
public:
explicit MapInlinerVisitor(HloComputation* computation)
: computation_(computation) {}
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
absl::Status HandleMap(HloInstruction* map) override;
absl::StatusOr<bool> Run(HloComputation* computation);
private:
HloComputation* computation_;
bool changed_ = false;
};
absl::StatusOr<bool> MapInlinerVisitor::Run(HloComputation* computation) {
changed_ = false;
computation_ = computation;
TF_RETURN_IF_ERROR(computation->root_instruction()->Accept(this));
return changed_;
}
absl::Status MapInlinerVisitor::HandleMap(HloInstruction* map) {
HloComputation* function = map->to_apply();
HloInstruction& root = *function->root_instruction();
if (hlo_query::AllOperandsAreParameters(root)) {
if (root.opcode() == HloOpcode::kFusion) {
return absl::OkStatus();
}
VLOG(10) << "inlining map({X ... Y}, op) => : op(X ... Y) with function "
<< root.ToShortString();
if (root.opcode() == HloOpcode::kParameter) {
TF_RETURN_IF_ERROR(
map->ReplaceAllUsesWith(map->operands()[root.parameter_number()]));
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(map));
} else if (root.opcode() == HloOpcode::kConstant) {
HloInstruction* constant = computation_->AddInstruction(root.Clone());
HloInstruction* placed_instruction = computation_->AddInstruction(
HloInstruction::CreateBroadcast(map->shape(), constant, {}));
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(map, placed_instruction));
} else {
std::vector<HloInstruction*> params;
for (int64_t o = 0; o < root.operands().size(); o++) {
params.push_back(map->operands()[root.operand(o)->parameter_number()]);
}
HloInstruction* placed_instruction = computation_->AddInstruction(
root.CloneWithNewOperands(map->shape(), params));
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(map, placed_instruction));
}
changed_ = true;
return absl::OkStatus();
}
return absl::OkStatus();
}
absl::StatusOr<bool> MapInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
MapInlinerVisitor visitor(nullptr);
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool computation_changed, visitor.Run(computation));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/map_inliner.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using MapInlinerTest = HloTestBase;
TEST_F(MapInlinerTest, MapMax) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kMaximum, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapMaxFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Maximum(lhs, rhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapConstant) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto const2_builder = HloComputation::Builder(TestName());
auto param1 = const2_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
(void)param1;
const2_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto const2_f32 = const2_builder.Build();
auto builder = HloComputation::Builder("MapConstFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(const2_f32));
hlo_module->AddEntryComputation(std::move(computation));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
root = hlo_module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Broadcast(op::Constant()));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapSubtractOppositeOrder) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto max_builder = HloComputation::Builder(TestName());
auto param1 = max_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "x"));
auto param2 = max_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "y"));
max_builder.AddInstruction(HloInstruction::CreateBinary(
param1->shape(), HloOpcode::kSubtract, param1, param2));
auto max_f32 = max_builder.Build();
auto builder = HloComputation::Builder("MapSubFunction");
auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({4, 3, 2, 1})));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(max_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),
op::Subtract(rhs, lhs));
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR1<float>({3, 1, -1, -3});
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_F(MapInlinerTest, MapParameter) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto param_builder = HloComputation::Builder(TestName());
param_builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, "p0"));
param_builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "p1"));
auto param_f32 = param_builder.Build();
auto builder = HloComputation::Builder("MapParamFunction");
auto lhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1)));
auto rhs = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4)));
builder.AddInstruction(
HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, param_f32.get()));
auto computation = builder.Build();
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEmbeddedComputation(std::move(param_f32));
hlo_module->AddEntryComputation(std::move(computation));
MapInliner inliner;
EXPECT_TRUE(inliner.Run(hlo_module.get()).value());
EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), rhs);
auto result = ExecuteAndTransfer(hlo_module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(4);
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
}
} |
1,907 | cpp | tensorflow/tensorflow | convolution_pred_expander | third_party/xla/xla/service/convolution_pred_expander.cc | third_party/xla/xla/service/convolution_pred_expander_test.cc | #ifndef XLA_SERVICE_CONVOLUTION_PRED_EXPANDER_H_
#define XLA_SERVICE_CONVOLUTION_PRED_EXPANDER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class ConvolutionPredExpander : public OpExpanderPass {
public:
absl::string_view name() const override {
return "convolution-pred-expander";
}
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/convolution_pred_expander.h"
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace m = match;
bool ConvolutionPredExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return Match(instruction, m::Convolution(m::Op().WithElementType(PRED),
m::Op().WithElementType(PRED))
.WithElementType(PRED));
}
absl::StatusOr<HloInstruction*> ConvolutionPredExpander::ExpandInstruction(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
absl::InlinedVector<HloInstruction*, 2> new_operands;
absl::c_transform(instruction->operands(), std::back_inserter(new_operands),
[&](HloInstruction* operand) {
CHECK_EQ(operand->shape().element_type(), PRED);
return MakeConvertToHlo(operand, F16);
});
Shape new_shape = ShapeUtil::ChangeElementType(instruction->shape(), F16);
HloInstruction* new_instruction = computation->AddInstruction(
instruction->CloneWithNewOperands(new_shape, new_operands));
return MakeConvertToHlo(new_instruction, PRED);
}
} | #include "xla/service/convolution_pred_expander.h"
#include <string>
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = match;
using ConvolutionPredExpanderTest = HloTestBase;
TEST_F(ConvolutionPredExpanderTest, Match) {
std::string hlo_string = R"(HloModule convolution_pred
ENTRY convolution_computation {
input = pred[10,10]{1,0} parameter(0)
kernel = pred[10,10]{1,0} parameter(1)
ROOT conv = pred[10,10]{1,0} convolution(input, kernel), dim_labels=bf_io->bf
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ConvolutionPredExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::Convolution(m::Op().WithElementType(F16),
m::Op().WithElementType(F16))
.WithElementType(F16))
.WithElementType(PRED)));
}
TEST_F(ConvolutionPredExpanderTest, NoMatch) {
std::string hlo_string = R"(HloModule convolution_s8
ENTRY convolution_computation {
input = s8[10,10]{1,0} parameter(0)
kernel = s8[10,10]{1,0} parameter(1)
ROOT conv = s8[10,10]{1,0} convolution(input, kernel), dim_labels=bf_io->bf
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ConvolutionPredExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
}
} |
1,908 | cpp | tensorflow/tensorflow | host_offloading_prepare | third_party/xla/xla/service/host_offloading_prepare.cc | third_party/xla/xla/service/host_offloading_prepare_test.cc | #ifndef XLA_SERVICE_HOST_OFFLOADING_PREPARE_H_
#define XLA_SERVICE_HOST_OFFLOADING_PREPARE_H_
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HostOffloadingPrepare : public HloModulePass {
public:
enum class Rewrite {
kElideMoveToHost,
kConvertToCustomCall,
};
static std::string RewriteName(Rewrite rewrite) {
switch (rewrite) {
case Rewrite::kElideMoveToHost:
return "elide-move-to-host";
case Rewrite::kConvertToCustomCall:
return "convert-to-custom-call";
}
}
explicit HostOffloadingPrepare(Rewrite rewrite)
: rewrite_(rewrite),
pass_name_(absl::StrCat("host-offloading-prepare", "-",
RewriteName(rewrite_))) {}
absl::string_view name() const override { return pass_name_; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
Rewrite rewrite_;
std::string pass_name_;
};
}
#endif
#include "xla/service/host_offloading_prepare.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using xla::host_memory_offload_annotations::kMoveToHostCustomCallTarget;
bool IsHostAsyncStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_execution_thread() == HloInstruction::kHostThread &&
instruction->async_wrapped_instruction()->opcode() == HloOpcode::kCall;
}
absl::StatusOr<bool> RemoveSurroundingMoveCustomCalls(
HloInstruction* async_start) {
bool removed = false;
for (HloInstruction* operand : async_start->operands()) {
if (operand->IsCustomCall(kMoveToHostCustomCallTarget)) {
CHECK_EQ(operand->operands().size(), 1);
VLOG(1) << "Replacing " << operand->ToString() << " with "
<< operand->operands().at(0)->ToString();
TF_RETURN_IF_ERROR(
operand->ReplaceAllUsesWith(operand->mutable_operand(0)));
TF_RETURN_IF_ERROR(async_start->parent()->RemoveInstruction(operand));
removed = true;
}
}
return removed;
}
absl::StatusOr<bool> ElideMoveCustomCalls(HloModule* module) {
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (HloComputation* computation : module->computations()) {
if (computation->execution_thread() != HloInstruction::kHostThread) {
continue;
}
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(computation);
for (HloInstruction* caller : callers) {
VLOG(2) << "Hlo computation " << computation->name()
<< " is offloaded to host and has caller " << caller->ToString();
if (caller->parent()->execution_thread() == HloInstruction::kHostThread) {
VLOG(3) << "Nested host computation, must be a async-wrapper";
continue;
}
VLOG(2) << "Going to adjust before and after " << caller->name();
}
}
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsHostAsyncStart(instruction)) {
VLOG(2) << "Found async start of host computation: "
<< instruction->ToString() << " done must be "
<< instruction->users().at(0)->ToString();
TF_ASSIGN_OR_RETURN(bool removed,
RemoveSurroundingMoveCustomCalls(instruction));
changed = changed || removed;
}
}
}
return changed;
}
absl::StatusOr<bool> ConvertToCustomCall(HloModule* module) {
bool changed = false;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsHostAsyncStart(instruction)) {
auto* call_start = Cast<HloAsyncInstruction>(instruction);
auto* call = call_start->async_wrapped_instruction();
auto custom_call = HloInstruction::CreateCustomCall(
call->shape(), call->operands(), call->called_computations().at(0),
"HostExecute");
custom_call->set_output_to_operand_aliasing(
call->output_operand_aliasing());
HloComputation* async_computation =
call_start->async_wrapped_computation();
async_computation->set_root_instruction(
async_computation->AddInstruction(std::move(custom_call)));
TF_RETURN_IF_ERROR(async_computation->RemoveInstruction(call));
changed = true;
}
}
}
return changed;
}
}
absl::StatusOr<bool> HostOffloadingPrepare::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
switch (rewrite_) {
case Rewrite::kElideMoveToHost:
return ElideMoveCustomCalls(module);
case Rewrite::kConvertToCustomCall:
return ConvertToCustomCall(module);
}
}
} | #include "xla/service/host_offloading_prepare.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Rewrite = HostOffloadingPrepare::Rewrite;
class HostOffloadingPrepareTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunRewrite(HloModule* module, Rewrite rewrite) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostOffloadingPrepare pass(rewrite);
TF_ASSIGN_OR_RETURN(bool changed, pass.Run(module));
return changed;
}
std::vector<const HloInstruction*> GetHostOffloadAsyncStartInstructions(
const HloModule* module) {
std::vector<const HloInstruction*> result;
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_execution_thread() ==
HloInstruction::kHostThread) {
result.push_back(instruction);
}
}
}
return result;
}
};
TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host, move_to_host), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
move_to_host.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host.1, move_to_host.2), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
custom-call.cloned.call-start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device, move_to_device), async_execution_thread="host", calls=async_computation
ROOT custom-call.cloned.call-done = s32[32]{0:T(128)} async-done(custom-call.cloned.call-start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
move_to_device.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device.1, move_to_device.2), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, ConvertToCustomCall) {
const char* hlo = R"(
HloModule my_module
host_computation {
Arg_0.0 = s32[32] parameter(0)
ROOT multiply.0 = s32[32] multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32] parameter(0)
ROOT call = s32[32] call(param_0), to_apply=host_computation
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32] parameter(0)
start = ((s32[32]), s32[32], u32[]) async-start(Arg_0.1),
async_execution_thread="host", calls=async_computation
ROOT done = s32[32] async-done(start)
}
)";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(
hlo, HostOffloadingPrepare(Rewrite::kConvertToCustomCall), expected);
}
}
} |
1,909 | cpp | tensorflow/tensorflow | convolution_4d_expander | third_party/xla/xla/service/convolution_4d_expander.cc | third_party/xla/xla/service/convolution_4d_expander_test.cc | #ifndef XLA_SERVICE_CONVOLUTION_4D_EXPANDER_H_
#define XLA_SERVICE_CONVOLUTION_4D_EXPANDER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class Convolution4DExpander : public OpExpanderPass {
public:
absl::string_view name() const override { return "convolution_4d_expander"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/convolution_4d_expander.h"
#include <algorithm>
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
bool Convolution4DExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kConvolution) {
return false;
}
const ConvolutionDimensionNumbers& dim_nums =
instruction->convolution_dimension_numbers();
if (dim_nums.input_spatial_dimensions().size() != 4) {
return false;
}
Shape input = instruction->operand(0)->shape();
for (int64_t i = 0; i < dim_nums.input_spatial_dimensions().size(); ++i) {
int64_t spatial_dim = dim_nums.input_spatial_dimensions(i);
if (input.dimensions(spatial_dim) == 1 &&
instruction->window().dimensions(i).padding_low() == 0 &&
instruction->window().dimensions(i).padding_high() == 0) {
return true;
}
}
return false;
}
absl::StatusOr<HloInstruction*> Convolution4DExpander::ExpandInstruction(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
ConvolutionDimensionNumbers dim_nums =
instruction->convolution_dimension_numbers();
ConvolutionDimensionNumbers new_dim_nums = dim_nums;
std::vector<int64_t> removed_input_dimensions;
std::vector<int64_t> removed_kernel_dimensions;
std::vector<int64_t> removed_output_dimensions;
new_dim_nums.clear_input_spatial_dimensions();
new_dim_nums.clear_output_spatial_dimensions();
new_dim_nums.clear_kernel_spatial_dimensions();
Window new_window;
HloInstruction* input = instruction->mutable_operand(0);
for (int64_t i = 0; i < dim_nums.input_spatial_dimensions().size(); ++i) {
int64_t input_spatial_dim = dim_nums.input_spatial_dimensions(i);
int64_t output_spatial_dim = dim_nums.output_spatial_dimensions(i);
int64_t kernel_spatial_dim = dim_nums.kernel_spatial_dimensions(i);
if (input->shape().dimensions(input_spatial_dim) == 1 &&
instruction->window().dimensions(i).padding_low() == 0 &&
instruction->window().dimensions(i).padding_high() == 0) {
removed_input_dimensions.push_back(input_spatial_dim);
removed_output_dimensions.push_back(output_spatial_dim);
removed_kernel_dimensions.push_back(kernel_spatial_dim);
} else {
*new_window.add_dimensions() = instruction->window().dimensions(i);
new_dim_nums.add_input_spatial_dimensions(input_spatial_dim);
new_dim_nums.add_output_spatial_dimensions(output_spatial_dim);
new_dim_nums.add_kernel_spatial_dimensions(kernel_spatial_dim);
}
}
std::sort(removed_input_dimensions.begin(), removed_input_dimensions.end(),
std::greater<>());
std::sort(removed_output_dimensions.begin(), removed_output_dimensions.end(),
std::greater<>());
std::sort(removed_kernel_dimensions.begin(), removed_kernel_dimensions.end(),
std::greater<>());
Shape new_input_shape = input->shape();
for (int64_t dim : removed_input_dimensions) {
new_input_shape.DeleteDimension(dim);
}
HloInstruction* kernel = instruction->mutable_operand(1);
Shape new_kernel_shape = kernel->shape();
for (int64_t dim : removed_kernel_dimensions) {
new_kernel_shape.DeleteDimension(dim);
}
Shape new_output_shape = instruction->shape();
for (int64_t dim : removed_output_dimensions) {
new_output_shape.DeleteDimension(dim);
}
auto compute_new_dimension =
[](const std::vector<int64_t>& removed_dimensions,
int64_t old_dimension) {
int64_t num_smaller = absl::c_count_if(
removed_dimensions, [old_dimension](int64_t removed_dimension) {
return removed_dimension < old_dimension;
});
return old_dimension - num_smaller;
};
new_dim_nums.set_input_batch_dimension(compute_new_dimension(
removed_input_dimensions, new_dim_nums.input_batch_dimension()));
new_dim_nums.set_input_feature_dimension(compute_new_dimension(
removed_input_dimensions, new_dim_nums.input_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.input_spatial_dimensions().size(); ++i) {
new_dim_nums.set_input_spatial_dimensions(
i, compute_new_dimension(removed_input_dimensions,
new_dim_nums.input_spatial_dimensions(i)));
}
new_dim_nums.set_output_batch_dimension(compute_new_dimension(
removed_output_dimensions, new_dim_nums.output_batch_dimension()));
new_dim_nums.set_output_feature_dimension(compute_new_dimension(
removed_output_dimensions, new_dim_nums.output_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.output_spatial_dimensions().size();
++i) {
new_dim_nums.set_output_spatial_dimensions(
i, compute_new_dimension(removed_output_dimensions,
new_dim_nums.output_spatial_dimensions(i)));
}
new_dim_nums.set_kernel_input_feature_dimension(
compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_input_feature_dimension()));
new_dim_nums.set_kernel_output_feature_dimension(
compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_output_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.kernel_spatial_dimensions().size();
++i) {
new_dim_nums.set_kernel_spatial_dimensions(
i, compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_spatial_dimensions(i)));
}
HloInstruction* reshaped_input = computation->AddInstruction(
HloInstruction::CreateReshape(new_input_shape, input));
HloInstruction* reshaped_kernel = computation->AddInstruction(
HloInstruction::CreateReshape(new_kernel_shape, kernel));
instruction->set_convolution_dimension_numbers(new_dim_nums);
instruction->set_window(new_window);
HloInstruction* new_convolution =
computation->AddInstruction(instruction->CloneWithNewOperands(
new_output_shape, {reshaped_input, reshaped_kernel}));
return computation->AddInstruction(
HloInstruction::CreateReshape(instruction->shape(), new_convolution));
}
} | #include "xla/service/convolution_4d_expander.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
using Convolution4DExpanderTest = HloTestBase;
TEST_F(Convolution4DExpanderTest, ConvertTo2DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 2);
}
TEST_F(Convolution4DExpanderTest, ConvertTo3DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,2,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4 pad=0_0x0_0x1_0x0_0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 3);
}
TEST_F(Convolution4DExpanderTest, ConvertTo0DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,1,1,1,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,1,1,1,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,1,1,1,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x1x1x1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 0);
}
TEST_F(Convolution4DExpanderTest, DontConvert3DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,1,1,5,20]{4,3,2,1,0} parameter(0)
kernel = f32[20,1,1,1,15]{4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,1,1,5]{4,3,2,1,0} convolution(input, kernel), dim_labels=012bf_i012o->f012b, window={size=1x1x1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 3);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
TEST_F(Convolution4DExpanderTest, DontConvertIfNoTrivialDimensionAvailable) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[2,10,2,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,2,2,2,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=2x2x2x4}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
TEST_F(Convolution4DExpanderTest, DontConvertIfPaddingIsNonzero) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4 stride=2x1x2x1 pad=1_0x0_0x0_1x0_0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
}
} |
1,910 | cpp | tensorflow/tensorflow | all_reduce_contiguous | third_party/xla/xla/service/all_reduce_contiguous.cc | third_party/xla/xla/service/all_reduce_contiguous_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_CONTIGUOUS_H_
#define XLA_SERVICE_ALL_REDUCE_CONTIGUOUS_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceContiguous : public HloModulePass {
public:
absl::string_view name() const override { return "all-reduce-contiguous"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/all_reduce_contiguous.h"
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::Status ReplaceWithContiguousAllReduce(
HloAllReduceInstruction* all_reduce) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
int64_t total_size = 0;
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
total_size += num_elements;
}
Shape concat_shape = ShapeUtil::MakeShape(element_type, {total_size});
HloInstruction* concatenated =
computation.AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, flat_operands, 0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
concat_shape, {concatenated}, all_reduce->to_apply(),
all_reduce->device_list(),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs;
outputs.reserve(all_reduce->operand_count());
int64_t offset = 0;
for (int64_t i = 0; i < all_reduce->operand_count(); ++i) {
const Shape& flat_shape = flat_operands[i]->shape();
int64_t end = offset + flat_shape.dimensions(0);
HloInstruction* sliced = computation.AddInstruction(
HloInstruction::CreateSlice(flat_shape, new_all_reduce,
{offset},
{end},
{1}));
outputs.push_back(computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), sliced)));
offset = end;
}
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
all_reduce, HloInstruction::CreateTuple(outputs)));
return absl::OkStatus();
}
}
absl::StatusOr<bool> AllReduceContiguous::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceContiguous";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceContiguous because the module contains all-reduce "
"with constrained layouts";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce &&
instruction->operand_count() > 1) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_RETURN_IF_ERROR(ReplaceWithContiguousAllReduce(all_reduce));
}
return !all_reduces.empty();
}
} | #include "xla/service/all_reduce_contiguous.h"
#include <memory>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllReduceContiguousTest = HloTestBase;
TEST_F(AllReduceContiguousTest, Simple) {
const absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[4,4] parameter(1)
ROOT crs = (f32[128], f32[4,4]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceContiguous pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* root = module->entry_computation()->root_instruction();
auto crs =
AllOf(op::Shape("f32[144]"),
op::AllReduce(op::Concatenate(op::Bitcast(op::Parameter(0)),
op::Bitcast(op::Parameter(1)))));
ASSERT_THAT(
root,
op::Tuple(AllOf(op::Shape("f32[128]"), op::Bitcast(op::Slice(crs))),
AllOf(op::Shape("f32[4,4]"), op::Bitcast(op::Slice(crs)))));
EXPECT_EQ(root->operand(0)->operand(0)->slice_starts(0), 0);
EXPECT_EQ(root->operand(0)->operand(0)->slice_limits(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_starts(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_limits(0), 128 + 4 * 4);
}
}
} |
1,911 | cpp | tensorflow/tensorflow | custom_call_target_registry | third_party/xla/xla/service/custom_call_target_registry.cc | third_party/xla/xla/service/custom_call_target_registry_test.cc | #ifndef XLA_SERVICE_CUSTOM_CALL_TARGET_REGISTRY_H_
#define XLA_SERVICE_CUSTOM_CALL_TARGET_REGISTRY_H_
#include <cstddef>
#include <functional>
#include <mutex>
#include <string>
#include <unordered_map>
#include <utility>
namespace xla {
class CustomCallTargetRegistry {
public:
static CustomCallTargetRegistry* Global();
void Register(const std::string& symbol, void* address,
const std::string& platform);
void* Lookup(const std::string& symbol, const std::string& platform) const;
std::unordered_map<std::string, void*> registered_symbols(
const std::string& platform) const;
private:
struct HashPairOfStrings {
size_t operator()(const std::pair<std::string, std::string>& k) const {
std::hash<std::string> hasher;
size_t h1 = hasher(k.first);
size_t h2 = hasher(k.second);
return h1 ^ 31 * h2;
}
};
std::unordered_map<std::pair<std::string, std::string>, void*,
HashPairOfStrings>
registered_symbols_;
mutable std::mutex mu_;
};
class RegisterCustomCallTarget {
public:
explicit RegisterCustomCallTarget(const std::string& name, void* address,
const std::string& platform) {
CustomCallTargetRegistry::Global()->Register(name, address, platform);
}
};
#define XLA_REGISTER_CUSTOM_CALL_CONCAT(a, b) a##b
#define XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM_HELPER(symbol, address, \
platform, counter) \
static ::xla::RegisterCustomCallTarget XLA_REGISTER_CUSTOM_CALL_CONCAT( \
custom_call_target_register, counter)( \
symbol, reinterpret_cast<void*>(address), platform)
#define XLA_REGISTER_CUSTOM_CALL_TARGET(function, platform) \
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(#function, function, platform)
#define XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(symbol, address, platform) \
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM_HELPER(symbol, address, platform, \
__COUNTER__)
#define XLA_CPU_REGISTER_CUSTOM_CALL_TARGET(function) \
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(#function, function, "Host")
#define XLA_CPU_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(symbol, address) \
XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(symbol, address, "Host")
}
#endif
#include "xla/service/custom_call_target_registry.h"
#include <cstdlib>
#include <iostream>
#include <mutex>
#include <string>
#include <unordered_map>
#include <utility>
namespace xla {
CustomCallTargetRegistry* CustomCallTargetRegistry::Global() {
static auto* registry = new CustomCallTargetRegistry;
return registry;
}
void CustomCallTargetRegistry::Register(const std::string& symbol,
void* address,
const std::string& platform) {
std::lock_guard<std::mutex> lock(mu_);
const auto [it, inserted] =
registered_symbols_.insert({{symbol, platform}, address});
if (!inserted && it->second != address) {
std::cerr << "Duplicate custom call registration detected for symbol \""
<< symbol << "\" with different addresses " << address
<< "(current) and " << it->second << " (previous) on platform "
<< platform
<< "Rejecting the registration to avoid confusion about which "
"symbol would actually get used at runtime.\n";
std::exit(1);
}
}
void* CustomCallTargetRegistry::Lookup(const std::string& symbol,
const std::string& platform) const {
std::lock_guard<std::mutex> lock(mu_);
auto it = registered_symbols_.find(std::make_pair(symbol, platform));
return it == registered_symbols_.end() ? nullptr : it->second;
}
std::unordered_map<std::string, void*>
CustomCallTargetRegistry::registered_symbols(
const std::string& platform) const {
std::unordered_map<std::string, void*> calls;
std::lock_guard<std::mutex> lock(mu_);
for (const auto& [metadata, address] : registered_symbols_) {
if (metadata.second == platform) {
calls[metadata.first] = address;
}
}
return calls;
}
} | #include "xla/service/custom_call_target_registry.h"
#include "xla/service/custom_call_status.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::_;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
void custom_call(void*, const void**, XlaCustomCallStatus*) {}
void custom_call2(void*, const void**, XlaCustomCallStatus*) {}
TEST(CustomCallRegistryTest, Registers) {
CustomCallTargetRegistry registry;
EXPECT_EQ(registry.Lookup("custom_call", "Host"), nullptr);
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"Host");
EXPECT_EQ(custom_call, registry.Lookup("custom_call", "Host"));
registry.Register("custom_call2", reinterpret_cast<void*>(&custom_call),
"Host");
EXPECT_EQ(registry.Lookup("custom_call", "CUDA"), nullptr);
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"CUDA");
EXPECT_EQ(custom_call, registry.Lookup("custom_call", "CUDA"));
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"Host");
EXPECT_THAT(
registry.registered_symbols("Host"),
UnorderedElementsAre(Pair("custom_call", _), Pair("custom_call2", _)));
EXPECT_THAT(registry.registered_symbols("CUDA"),
UnorderedElementsAre(Pair("custom_call", _)));
}
TEST(CustomCallRegistryDeathTest, RejectsDuplicateRegistrations) {
CustomCallTargetRegistry registry;
registry.Register("custom_call", reinterpret_cast<void*>(custom_call),
"Host");
EXPECT_DEATH(registry.Register("custom_call",
reinterpret_cast<void*>(custom_call2), "Host"),
"Duplicate custom call");
}
}
} |
1,912 | cpp | tensorflow/tensorflow | gather_simplifier | third_party/xla/xla/service/gather_simplifier.cc | third_party/xla/xla/service/gather_simplifier_test.cc | #ifndef XLA_SERVICE_GATHER_SIMPLIFIER_H_
#define XLA_SERVICE_GATHER_SIMPLIFIER_H_
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class GatherSimplifier : public OpExpanderPass {
public:
absl::string_view name() const override { return "gather_simplifier"; }
static bool IsSimplifiedGather(const HloGatherInstruction* gather);
protected:
bool InstructionMatchesPattern(HloInstruction* inst) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* inst) override;
};
}
#endif
#include "xla/service/gather_simplifier.h"
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/service/gather_scatter_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<HloInstruction*> GatherSimplifier::ExpandInstruction(
HloInstruction* inst) {
auto* gather = DynCast<HloGatherInstruction>(inst);
if (absl::c_linear_search(gather->gather_slice_sizes(), 0)) {
auto* zero = gather->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(gather->shape().element_type())));
return gather->AddInstruction(
HloInstruction::CreateBroadcast(gather->shape(), zero, {}));
}
const auto& dims = gather->gather_dimension_numbers();
int operand_rank =
dims.collapsed_slice_dims().size() + dims.offset_dims().size();
auto [operand_permutation, operand_permutation_inverse] =
MakeOperandStartIndexPermutations(dims.start_index_map(), operand_rank);
auto* operand = gather->operands()[0];
auto* start_indices = gather->operands()[1];
TF_ASSIGN_OR_RETURN(operand, MaybeTranspose(operand, operand_permutation));
TF_ASSIGN_OR_RETURN(
start_indices,
TransformStartIndices(start_indices, dims.index_vector_dim()));
auto slice_sizes = Permute(gather->gather_slice_sizes(), operand_permutation);
std::vector<int64_t> output_dims = {start_indices->shape().dimensions(0)};
absl::c_copy(slice_sizes, std::back_inserter(output_dims));
Shape output_shape =
ShapeUtil::MakeShape(operand->shape().element_type(), output_dims);
std::vector<int64_t> offset_dims(operand_rank);
absl::c_iota(offset_dims, 1);
std::vector<int64_t> start_index_map(dims.start_index_map().size());
absl::c_iota(start_index_map, 0);
auto* result = gather->AddInstruction(HloInstruction::CreateGather(
output_shape, operand, start_indices,
HloGatherInstruction::MakeGatherDimNumbers(
offset_dims,
{}, start_index_map, 1),
slice_sizes, gather->indices_are_sorted()));
std::vector<int64_t> output_permutation(1 +
operand_rank);
absl::c_transform(operand_permutation_inverse, output_permutation.begin() + 1,
[](int64_t dim) { return dim + 1; });
TF_ASSIGN_OR_RETURN(result, MaybeTranspose(result, output_permutation));
if (!dims.collapsed_slice_dims().empty()) {
std::vector<int64_t> collapsed_slice_dims(
dims.collapsed_slice_dims().size());
absl::c_transform(dims.collapsed_slice_dims(), collapsed_slice_dims.begin(),
[](int64_t dim) { return dim + 1; });
TF_ASSIGN_OR_RETURN(result,
ElideDegenerateDims(result, collapsed_slice_dims));
}
auto original_start_index_dims = gather->operands()[1]->shape().dimensions();
std::vector<int64_t> start_indices_dims;
for (int i = 0; i < original_start_index_dims.size(); ++i) {
if (i != dims.index_vector_dim()) {
start_indices_dims.push_back(original_start_index_dims[i]);
}
}
if (start_indices_dims.size() > 1) {
TF_ASSIGN_OR_RETURN(result,
ExpandFirstDimIntoNDims(result, start_indices_dims));
} else if (start_indices_dims.empty()) {
TF_ASSIGN_OR_RETURN(result, ElideDegenerateDims(result, {0}));
}
std::vector<int64_t> output_perm;
auto output_rank = static_cast<int64_t>(start_indices_dims.size() +
dims.offset_dims().size());
output_perm.reserve(output_rank);
auto offset_dim_index = static_cast<int64_t>(start_indices_dims.size());
int64_t start_index_dim_index = 0;
for (int64_t i = 0; i < output_rank; ++i) {
if (absl::c_linear_search(dims.offset_dims(), i)) {
output_perm.push_back(offset_dim_index++);
} else {
output_perm.push_back(start_index_dim_index++);
}
}
return MaybeTranspose(result, output_perm);
}
bool GatherSimplifier::IsSimplifiedGather(const HloGatherInstruction* gather) {
auto* start_indices = gather->operands()[1];
const auto& dims = gather->gather_dimension_numbers();
return start_indices->shape().rank() == 2 && dims.index_vector_dim() == 1 &&
IsIdentityPermutation(dims.start_index_map()) &&
dims.collapsed_slice_dims().empty() &&
*dims.offset_dims().begin() == 1 &&
*dims.offset_dims().rbegin() == dims.offset_dims().size();
}
bool GatherSimplifier::InstructionMatchesPattern(HloInstruction* inst) {
auto* gather = DynCast<HloGatherInstruction>(inst);
return gather && !IsSimplifiedGather(gather);
}
} | #include "xla/service/gather_simplifier.h"
#include <optional>
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class GatherSimplifierTest : public HloTestBase {};
TEST_F(GatherSimplifierTest, TransformsStartIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34] parameter(0)
indices = s32[42,43] parameter(1)
ROOT gather = f32[42,43,7,8] gather(operand, indices),
offset_dims={2,3},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=2,
slice_sizes={7,8}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[VECTOR_DIM:.*]] = s32[42,43,1]{2,1,0} reshape(%indices)
CHECK: %[[INDICES_2D:.*]] = s32[1806,1]{1,0} reshape(%[[VECTOR_DIM]])
CHECK: %[[GATHER:.*]] = f32[1806,7,8]{{.*}} gather(
CHECK-SAME: %operand, %[[INDICES_2D]]),
CHECK-SAME: offset_dims={1,2},
CHECK-SAME: collapsed_slice_dims={},
CHECK-SAME: start_index_map={0},
CHECK-SAME: index_vector_dim=1,
CHECK-SAME: slice_sizes={7,8}
CHECK: ROOT %{{.*}} = f32[42,43,7,8]{3,2,1,0} reshape(%[[GATHER]])
)");
}
TEST_F(GatherSimplifierTest, RemovesCollapsedSliceDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34] parameter(0)
indices = s32[42,1] parameter(1)
ROOT gather = f32[42] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[GATHER:.*]] = f32[42,1,1]{2,1,0} gather(%operand, %indices)
CHECK-SAME: offset_dims={1,2},
CHECK-SAME: collapsed_slice_dims={},
CHECK: ROOT %{{.*}} = f32[42]{0} reshape(%[[GATHER]])
)");
}
TEST_F(GatherSimplifierTest, MakesStartIndexMapIdentity) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34,35] parameter(0)
indices = s32[42,3] parameter(1)
ROOT gather = f32[42,1,2,3] gather(operand, indices),
offset_dims={1,2,3},
collapsed_slice_dims={},
start_index_map={2,0,1},
index_vector_dim=1,
slice_sizes={1,2,3}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
%operand = f32[33,34,35]{2,1,0} parameter(0)
CHECK: %[[OPERAND:.*]] = f32[35,33,34]{2,1,0} transpose(%operand)
CHECK: %[[GATHER:.*]] = f32[42,3,1,2]{{.*}} gather(%[[OPERAND]],
CHECK-SAME: start_index_map={0,1,2},
CHECK: ROOT {{.*}} = f32[42,1,2,3]{{.*}} transpose(%[[GATHER]])
)");
}
TEST_F(GatherSimplifierTest, CollapsesSomeDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[33,34,35] parameter(0)
indices = s32[42,1] parameter(1)
ROOT gather = f32[7,42] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={0,2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,7,1}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[GATHER:.*]] = f32[42,1,7,1]{3,2,1,0} gather(
CHECK: %[[COLLAPSED:.*]] = f32[42,7]{1,0} reshape(%[[GATHER]])
CHECK: ROOT {{.*}} = f32[7,42]{1,0} transpose(%[[COLLAPSED]]),
CHECK-SAME: dimensions={1,0}
)");
}
TEST_F(GatherSimplifierTest, ZeroDimStartIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[8,16] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = f32[8,16] gather(f32[8,16] operand, s32[2] indices),
offset_dims={0,1},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={8,16}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: gather(
)");
}
TEST_F(GatherSimplifierTest, ZeroSizeSlice) {
constexpr absl::string_view kModuleStr = R"(
HloModule gather_simplifier
ENTRY kernel_entry {
operand = f32[0,2] parameter(0)
indices = s32[3] parameter(1)
ROOT gather = f32[3,2] gather(f32[0,2] operand, s32[3]{0} indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={0,2}
})";
RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"(
CHECK: %[[ZERO:.*]] = f32[] constant(0)
CHECK: ROOT {{.*}} = f32[3,2]{1,0} broadcast(%[[ZERO]]), dimensions={}
)");
}
}
} |
1,913 | cpp | tensorflow/tensorflow | reduce_scatter_reassociate | third_party/xla/xla/service/reduce_scatter_reassociate.cc | third_party/xla/xla/service/reduce_scatter_reassociate_test.cc | #ifndef XLA_SERVICE_REDUCE_SCATTER_REASSOCIATE_H_
#define XLA_SERVICE_REDUCE_SCATTER_REASSOCIATE_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReduceScatterReassociate : public HloModulePass {
public:
absl::string_view name() const override {
return "reduce-scatter-reassociate";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/reduce_scatter_reassociate.h"
#include <optional>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool AreCompatible(const HloReduceScatterInstruction *rs0,
const HloReduceScatterInstruction *rs1,
ReductionKind op_kind) {
std::optional<AllReduceKey> key0 = GetAllReduceKey(rs0);
std::optional<AllReduceKey> key1 = GetAllReduceKey(rs1);
auto kind0 = MatchReductionComputation(rs0->to_apply());
auto dims_match = rs0->scatter_dimension() == rs1->scatter_dimension();
return key0 && key1 && kind0 && *key0 == *key1 && kind0 == op_kind &&
dims_match;
}
}
absl::StatusOr<bool> ReduceScatterReassociate::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedCollective(
*module, HloOpcode::kReduceScatter)) {
VLOG(1)
<< "Skip ReduceScatterReassociate because the module contains reduce-"
"scatter with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
std::optional<ReductionKind> kind = MatchReductionInstruction(inst);
if (!kind || inst->operand(0)->opcode() != HloOpcode::kReduceScatter ||
inst->operand(1)->opcode() != HloOpcode::kReduceScatter ||
!inst->shape().IsArray()) {
continue;
}
auto *rs0 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(0));
auto *rs1 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(1));
if (!AreCompatible(rs0, rs1, *kind)) {
VLOG(2) << "Reduce-Scatter operations are not compatible, skipping";
continue;
}
if (rs0->user_count() != 1 || rs1->user_count() != 1) {
VLOG(2) << "Reduce-Scatter operations have > 1 users";
continue;
}
HloInstruction *new_op =
computation->AddInstruction(inst->CloneWithNewOperands(
rs0->mutable_operand(0)->shape(),
{rs0->mutable_operand(0), rs1->mutable_operand(0)}));
HloInstruction *new_rs = computation->AddInstruction(
rs0->CloneWithNewOperands(inst->shape(), {new_op}));
if (new_rs->channel_id()) {
new_rs->set_channel_id(next_channel_id++);
}
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_rs));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs0));
if (rs0 != rs1) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs1));
}
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_reassociate.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class ReduceScatterReassociateTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed = ReduceScatterReassociate().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t ReduceScatterCount(std::unique_ptr<HloModule>& module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kReduceScatter>);
}
};
TEST_F(ReduceScatterReassociateTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, SimpleWithConstrainLayout) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, constrain_layout=true, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, constrain_layout=true, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, SimpleChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum
rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum
add0 = f32[4] add(rs0, rs1)
add1 = f32[4] add(add0, rs2)
ROOT add2 = f32[4] add(add1, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(
m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),
m::Parameter(3))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, SimpleTree) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum
rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum
add0 = f32[4] add(rs0, rs1)
add1 = f32[4] add(rs2, rs3)
ROOT add2 = f32[4] add(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::ReduceScatter(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Add(m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(ReduceScatterCount(module), 1);
}
TEST_F(ReduceScatterReassociateTest, MismatchOp0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchOp1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=max
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchDimension) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
rs0 = f32[8,8] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[8,8] reduce-scatter(p1), dimensions={1}, to_apply=sum
ROOT add = f32[8,8] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0}}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchHasChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, channel_id=3, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, MismatchUseGlobalDeviceId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1}}, channel_id=3, use_global_device_ids=true, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={{0,1}}, channel_id=4, to_apply=sum
ROOT add = f32[4] add(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, NotSingleUser) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum
add = f32[4] add(rs0, rs1)
ROOT t = (f32[4], f32[4]) tuple(rs0, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterReassociateTest, DoubleUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum
add = f32[4] add(rs0, rs0)
ROOT c = f32[4] copy(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
}
}
} |
1,914 | cpp | tensorflow/tensorflow | tuple_points_to_analysis | third_party/xla/xla/service/tuple_points_to_analysis.cc | third_party/xla/xla/service/tuple_points_to_analysis_test.cc | #ifndef XLA_SERVICE_TUPLE_POINTS_TO_ANALYSIS_H_
#define XLA_SERVICE_TUPLE_POINTS_TO_ANALYSIS_H_
#include <stddef.h>
#include <iosfwd>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/logical_buffer_analysis.h"
#include "xla/shape_tree.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/gtl/compactptrset.h"
#include "tsl/platform/status.h"
namespace xla {
class PointsToSet {
public:
explicit PointsToSet(const Shape* shape) : tree_(shape) {}
bool IsAmbiguous() const;
bool IsDistinct() const;
size_t size() const;
using BufferSet = tsl::gtl::CompactPointerSet<const LogicalBuffer*>;
BufferSet CreateFlattenedSet() const;
bool ContainsBufferAtIndex(const LogicalBuffer& buffer,
const ShapeIndex& index) const;
bool ContainsBuffer(const LogicalBuffer& buffer) const;
void AddPointedToBuffer(const LogicalBuffer& buffer, const ShapeIndex& index);
using SourceSet = tsl::gtl::CompactPointerSet<HloInstruction*>;
const SourceSet& tuple_sources(const ShapeIndex& index) const;
void add_tuple_source(const ShapeIndex& index, HloInstruction* tuple);
using BufferList = absl::InlinedVector<const LogicalBuffer*, 1>;
const BufferList& element(const ShapeIndex& index) const {
return tree_.element(index).buffers;
}
BufferList* mutable_element(const ShapeIndex& index) {
return &tree_.mutable_element(index)->buffers;
}
template <typename Fn>
void ForEachElement(const Fn& fn) const {
tree_.ForEachElement([&fn](const ShapeIndex& index, const Elem& elem) {
fn(index, elem.buffers);
});
}
template <typename Fn>
void ForEachMutableElement(const Fn& fn) {
tree_.ForEachMutableElement([&fn](const ShapeIndex& index, Elem* elem) {
fn(index, &elem->buffers);
});
}
template <typename Fn>
absl::Status ForEachElementWithStatus(const Fn& fn) const {
return tree_.ForEachElementWithStatus(
[&fn](const ShapeIndex& index, const Elem& elem) {
return fn(index, elem.buffers);
});
}
private:
struct Elem {
BufferList buffers;
SourceSet tuple_sources;
};
ShapeTree<Elem> tree_;
PointsToSet(const PointsToSet&) = delete;
PointsToSet& operator=(const PointsToSet&) = delete;
};
class BufferAlias {
public:
BufferAlias(HloInstruction* instruction, const ShapeIndex& index)
: instruction_(instruction), index_(index) {}
HloInstruction* instruction() const { return instruction_; }
const ShapeIndex& index() const { return index_; }
bool operator==(const BufferAlias& other) const {
return instruction_ == other.instruction_ && index_ == other.index_;
}
bool operator!=(const BufferAlias& other) const { return !(*this == other); }
std::string ToString() const;
private:
HloInstruction* instruction_;
ShapeIndex index_;
};
std::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias);
class TuplePointsToAnalysis : public DfsHloVisitorWithDefault {
public:
static absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>> Run(
const HloModule* module);
const PointsToSet& GetPointsToSet(
const HloInstruction* hlo_instruction) const;
const LogicalBuffer& GetBuffer(LogicalBuffer::Id id) const;
absl::StatusOr<const LogicalBuffer*> GetBufferDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) const;
using BufferAliasVector = absl::InlinedVector<BufferAlias, 1>;
const BufferAliasVector& GetBufferAliases(const LogicalBuffer& buffer) const;
LogicalBuffer::Id num_logical_buffers() const {
return logical_buffer_analysis_->num_logical_buffers();
}
LogicalBuffer& logical_buffer(LogicalBuffer::Id id) const {
return logical_buffer_analysis_->GetBuffer(id);
}
using BufferDefinitionVector = absl::InlinedVector<const LogicalBuffer*, 1>;
const BufferDefinitionVector& GetBuffersDefinedByInstruction(
const HloInstruction* instruction) const;
bool InstructionDefinesBufferAtIndex(const HloInstruction* instruction,
const ShapeIndex& index) const;
absl::Status VerifyBuffer(const LogicalBuffer& buffer) const;
absl::Status DefaultAction(HloInstruction* hlo_instruction) override;
absl::Status HandleTuple(HloInstruction* tuple) override;
absl::Status HandleGetTupleElement(
HloInstruction* get_tuple_element) override;
absl::Status HandleAsyncStart(HloInstruction* async_start) override;
absl::Status HandleAsyncUpdate(HloInstruction* async_update) override;
absl::Status HandleAsyncDone(HloInstruction* async_done) override;
absl::Status HandleBitcast(HloInstruction* bitcast) override;
absl::Status HandleDomain(HloInstruction* domain) override;
absl::Status HandleCopy(HloInstruction* copy) override;
absl::Status HandleCopyStart(HloInstruction* copy_start) override;
absl::Status HandleCopyDone(HloInstruction* copy_done) override;
absl::Status HandleRecvDone(HloInstruction* recv_done) override;
absl::Status HandleSend(HloInstruction* send) override;
absl::Status HandleAddDependency(HloInstruction* add_dependency) override;
absl::Status HandleCustomCall(HloInstruction* custom_call) override;
absl::Status HandleFusion(HloInstruction* fusion) override;
absl::Status HandleOptimizationBarrier(HloInstruction* barrier) override;
std::string ToString() const;
bool DoesNotUseOperandBuffer(const HloInstruction* operand,
const ShapeIndex& index,
const HloInstruction* user) const;
private:
explicit TuplePointsToAnalysis(
const HloModule* module,
std::unique_ptr<LogicalBufferAnalysis> logical_buffer_analysis)
: module_(module),
logical_buffer_analysis_(std::move(logical_buffer_analysis)) {}
absl::Status Analyze();
absl::Status PopulateDefinedBuffersAndAliases(
const decltype(std::declval<HloComputation>()
.instructions())& instructions);
PointsToSet& CreateEmptyPointsToSet(const HloInstruction* instruction);
PointsToSet& CreateCopiedPointsToSet(const HloInstruction* instruction,
const HloInstruction* src);
absl::Status GatherBuffersDefinedByInstruction(
const HloInstruction* instruction, BufferDefinitionVector* buffers);
void InstructionToString(const HloInstruction* instruction,
std::string* output) const;
struct PerInstruction {
std::unique_ptr<PointsToSet> points_to_set;
BufferDefinitionVector instruction_defined_buffers;
};
const PerInstruction* PerInst(const HloInstruction* inst) const {
int id = inst->unique_id();
DCHECK_GE(id, 0);
auto iter = per_instruction_.find(id);
if (iter == per_instruction_.end()) {
LOG(FATAL) << "Expected per-instruction information to already exist";
} else {
return iter->second.get();
}
}
PerInstruction* PerInst(const HloInstruction* inst) {
int id = inst->unique_id();
DCHECK_GE(id, 0);
auto iter = per_instruction_.find(id);
if (iter == per_instruction_.end()) {
return per_instruction_.emplace(id, std::make_unique<PerInstruction>())
.first->second.get();
} else {
return iter->second.get();
}
}
std::vector<std::pair<HloInstruction*, int64_t>>
GetAllUsesOfInstructionAtIndex(HloInstruction* instruction,
const ShapeIndex& index) const;
bool HasUniqueFusedUseOfOperandAt(HloInstruction* operand,
const ShapeIndex& operand_index,
HloInstruction* fusion,
const int64_t use_operand_index) const;
const HloModule* module_;
const std::unique_ptr<LogicalBufferAnalysis> logical_buffer_analysis_;
absl::flat_hash_map<int, std::unique_ptr<PerInstruction>> per_instruction_;
std::vector<BufferAliasVector> logical_buffer_aliases_;
TuplePointsToAnalysis(const TuplePointsToAnalysis&) = delete;
TuplePointsToAnalysis& operator=(const TuplePointsToAnalysis&) = delete;
const bool alias_buffer_across_dataflow_ = false;
};
}
#endif
#include "xla/service/tuple_points_to_analysis.h"
#include <memory>
#include <ostream>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
std::string BufferAlias::ToString() const {
return absl::StrCat("BufferAlias(", instruction_->name(), "[",
absl::StrJoin(index_, ","), "])");
}
std::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias) {
out << buffer_alias.ToString();
return out;
}
bool PointsToSet::IsAmbiguous() const {
bool ambiguous = false;
ForEachElement(
[&ambiguous](const ShapeIndex& , const BufferList& points_to) {
ambiguous |= points_to.size() > 1;
});
return ambiguous;
}
bool PointsToSet::IsDistinct() const {
bool distinct = true;
absl::flat_hash_set<const LogicalBuffer*> all_points_to;
ForEachElement([&](const ShapeIndex& , const BufferList& points_to) {
for (auto& buffer : points_to) {
if (all_points_to.contains(buffer)) {
distinct = false;
}
all_points_to.insert(buffer);
}
});
return distinct;
}
size_t PointsToSet::size() const {
return CreateFlattenedSet().size();
}
PointsToSet::BufferSet PointsToSet::CreateFlattenedSet() const {
BufferSet flat_set;
ForEachElement(
[&flat_set](const ShapeIndex& , const BufferList& buffers) {
flat_set.insert(buffers.begin(), buffers.end());
});
return flat_set;
}
bool PointsToSet::ContainsBuffer(const LogicalBuffer& buffer) const {
bool found = false;
ForEachElement([&found, &buffer](const ShapeIndex& ,
const BufferList& pointed_to_buffers) {
if (!found && absl::c_linear_search(pointed_to_buffers, &buffer)) {
found = true;
}
});
return found;
}
bool PointsToSet::ContainsBufferAtIndex(const LogicalBuffer& buffer,
const ShapeIndex& index) const {
const auto& pointed_to_buffers = element(index);
return absl::c_linear_search(pointed_to_buffers, &buffer);
}
void PointsToSet::AddPointedToBuffer(const LogicalBuffer& buffer,
const ShapeIndex& index) {
if (ContainsBufferAtIndex(buffer, index)) {
return;
}
mutable_element(index)->push_back(&buffer);
}
const PointsToSet::SourceSet& PointsToSet::tuple_sources(
const ShapeIndex& index) const {
return tree_.element(index).tuple_sources;
}
void PointsToSet::add_tuple_source(const ShapeIndex& index,
HloInstruction* tuple) {
tree_.mutable_element(index)->tuple_sources.insert(tuple);
}
namespace {
void GatherFusionInstructions(
HloInstruction* instruction,
std::vector<HloInstruction*>* fusion_instructions) {
CHECK_EQ(HloOpcode::kFusion, instruction->opcode());
for (auto* fused : instruction->fused_instructions()) {
if (fused->opcode() == HloOpcode::kFusion) {
GatherFusionInstructions(fused, fusion_instructions);
}
}
fusion_instructions->push_back(instruction);
}
}
absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>>
TuplePointsToAnalysis::Run(const HloModule* module) {
auto logical_buffer_analysis = LogicalBufferAnalysis::Run(module);
std::unique_ptr<TuplePointsToAnalysis> analysis(new TuplePointsToAnalysis(
module, std::move(logical_buffer_analysis).value()));
TF_RETURN_IF_ERROR(analysis->Analyze());
return std::move(analysis);
}
absl::Status TuplePointsToAnalysis::Analyze() {
per_instruction_.clear();
per_instruction_.reserve(module_->instruction_count());
logical_buffer_aliases_.clear();
logical_buffer_aliases_.resize(
logical_buffer_analysis_->num_logical_buffers());
std::vector<HloInstruction*> fusion_instructions;
for (auto* computation : module_->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(this));
TF_RETURN_IF_ERROR(
PopulateDefinedBuffersAndAliases(computation->instructions()));
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
GatherFusionInstructions(instruction, &fusion_instructions);
}
}
}
for (auto* instruction : fusion_instructions) {
TF_RETURN_IF_ERROR(instruction->fused_expression_root()->Accept(this));
TF_RETURN_IF_ERROR(
PopulateDefinedBuffersAndAliases(instruction->fused_instructions()));
}
XLA_VLOG_LINES(3, ToString());
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::PopulateDefinedBuffersAndAliases(
const decltype(std::declval<HloComputation>()
.instructions())& instructions) {
for (auto* instruction : instructions) {
PerInstruction* pi = PerInst(instruction);
TF_RETURN_IF_ERROR(GatherBuffersDefinedByInstruction(
instruction, &pi->instruction_defined_buffers));
const PointsToSet& points_to_set = GetPointsToSet(instruction);
points_to_set.ForEachElement(
[this, &instruction](
const ShapeIndex& index,
const PointsToSet::BufferList& pointed_to_buffers) {
for (const LogicalBuffer* buffer : pointed_to_buffers) {
logical_buffer_aliases_[buffer->id()].emplace_back(instruction,
index);
}
});
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::DefaultAction(
HloInstruction* hlo_instruction) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(hlo_instruction);
points_to_set.ForEachMutableElement(
[this, hlo_instruction](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(hlo_instruction, index));
});
if (hlo_instruction->shape().IsTuple()) {
points_to_set.add_tuple_source({}, hlo_instruction);
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
int64_t element_index = get_tuple_element->tuple_index();
PointsToSet& points_to_set = CreateEmptyPointsToSet(get_tuple_element);
const PointsToSet& operand_points_to_set =
*PerInst(get_tuple_element->operand(0))->points_to_set;
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* points_to) {
ShapeIndex src_index;
src_index.push_back(element_index);
for (auto element : target_index) {
src_index.push_back(element);
}
*points_to = operand_points_to_set.element(src_index);
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopy(HloInstruction* copy) {
PointsToSet& points_to_set = CreateCopiedPointsToSet(copy, copy->operand(0));
points_to_set.mutable_element({})->clear();
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(copy, {}),
{});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleBitcast(HloInstruction* bitcast) {
CreateCopiedPointsToSet(bitcast, bitcast->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleDomain(HloInstruction* domain) {
CreateCopiedPointsToSet(domain, domain->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAddDependency(
HloInstruction* add_dependency) {
CreateCopiedPointsToSet(add_dependency, add_dependency->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done);
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(recv_done, {}),
{});
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(recv_done, {1}),
{1});
const PointsToSet& operand_points_to_set =
GetPointsToSet(recv_done->operand(0));
points_to_set.ForEachMutableElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& index, PointsToSet::BufferList* buffers) {
if (index.empty() || index[0] != 0) {
return;
}
*buffers = operand_points_to_set.element(index);
for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple_source);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncStart(
HloInstruction* async_start) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_start);
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {
if (target_index.size() >= 2 && target_index.front() == 0) {
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_start->operand(target_index[1]));
ShapeIndex source_index(target_index.begin() + 2, target_index.end());
*buffers = operand_points_to_set.element(source_index);
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(source_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
} else {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(async_start, target_index));
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncUpdate(
HloInstruction* async_update) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_update);
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_update->operand(0));
CHECK_EQ(async_update->shape(), async_update->operand(0)->shape());
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
*buffers = operand_points_to_set.element(index);
for (HloInstruction* tuple : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncDone(
HloInstruction* async_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_done);
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_done->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
if (!src_index.empty() && src_index.front() == 1) {
const ShapeIndex target_index(src_index.begin() + 1, src_index.end());
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopyStart(
HloInstruction* copy_start) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_start);
const PointsToSet& operand_points_to_set =
GetPointsToSet(copy_start->operand(0));
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {
if (target_index == ShapeIndex({1})) {
*buffers = operand_points_to_set.element({});
} else {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(copy_start, target_index));
}
});
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources({})) {
points_to_set.add_tuple_source({1}, tuple);
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopyDone(HloInstruction* copy_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_done);
const PointsToSet& operand_points_to_set =
GetPointsToSet(copy_done->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
if (src_index == ShapeIndex({0})) {
const ShapeIndex target_index = {};
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(send);
auto top_buffer = points_to_set.mutable_element(ShapeIndex({}));
top_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({})));
points_to_set.add_tuple_source({}, send);
auto context_buffer = points_to_set.mutable_element(ShapeIndex({1}));
context_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1})));
auto token_buffer = points_to_set.mutable_element(ShapeIndex({2}));
token_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2})));
const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
ShapeIndex target_index({0});
for (auto element : src_index) {
target_index.push_back(element);
}
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) { | #include "xla/service/tuple_points_to_analysis.h"
#include <map>
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
class TuplePointsToAnalysisTest : public HloTestBase {
protected:
void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) {
BuildModule(std::move(computation));
RunAnalysis();
}
void BuildModule(std::unique_ptr<HloComputation> computation) {
module_ = CreateNewVerifiedModule();
module_->AddEntryComputation(std::move(computation));
}
void RunAnalysis() {
CHECK_NOTNULL(module_.get());
points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();
}
const LogicalBuffer* const GetBuffer(const HloInstruction* instruction,
const ShapeIndex& index) {
const auto& pointed_to =
points_to_analysis_->GetPointsToSet(instruction).element(index);
CHECK_EQ(1, pointed_to.size());
CHECK_EQ(instruction, pointed_to[0]->instruction());
CHECK(index == pointed_to[0]->index());
return pointed_to[0];
}
void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set,
absl::Span<const LogicalBuffer* const> buffers) {
std::vector<const LogicalBuffer*> vec(buffers.begin(), buffers.end());
EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec));
}
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferList& points_to_set,
absl::Span<HloInstruction* const> instructions) {
PointsToSet::BufferList buffers;
for (auto instruction : instructions) {
buffers.push_back(GetBuffer(instruction, {}));
}
ExpectHasBuffers(points_to_set, buffers);
}
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferSet& points_to_set,
absl::Span<HloInstruction* const> instructions) {
ExpectHasTopLevelBuffers(
PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()),
instructions);
}
void ExpectHasBufferAliases(
const HloInstruction* instruction, const ShapeIndex& index,
absl::Span<const std::pair<HloInstruction*, ShapeIndex>> expected) {
const LogicalBuffer* buffer =
points_to_analysis_->GetBufferDefinedAt(instruction, index).value();
std::vector<BufferAlias> expected_aliases;
expected_aliases.reserve(expected.size());
for (auto& pair : expected) {
expected_aliases.push_back(BufferAlias(pair.first, pair.second));
}
EXPECT_THAT(points_to_analysis_->GetBufferAliases(*buffer),
UnorderedElementsAreArray(expected_aliases));
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
};
}
} |
1,915 | cpp | tensorflow/tensorflow | hlo_unstacker | third_party/xla/xla/service/hlo_unstacker.cc | third_party/xla/xla/service/hlo_unstacker_test.cc | #ifndef XLA_SERVICE_HLO_UNSTACKER_H_
#define XLA_SERVICE_HLO_UNSTACKER_H_
#include <stdbool.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloUnstacker : public HloModulePass {
public:
~HloUnstacker() override = default;
explicit HloUnstacker() = default;
absl::string_view name() const override { return "hlo_unstacker"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/hlo_unstacker.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_util.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct UnstackerMetadata {
static absl::StatusOr<UnstackerMetadata> Create(HloModule* module) {
UnstackerMetadata metadata;
TF_ASSIGN_OR_RETURN(
bool prepared,
WhileLoopUnroller::PrepareModuleForUnrolling(module, {}));
if (prepared) {
VLOG(3) << "Prepared module: " << module->name() << " for unstacking.";
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> loops =
WhileLoopUnroller::GetUnrollableLoops(module, {});
for (const auto& [instr, while_loop_config] : loops) {
metadata.unrollable_loop_bodies[instr->while_body()] = while_loop_config;
metadata.bodies[instr->while_body()] = instr;
}
return metadata;
}
absl::flat_hash_map<HloComputation*, WhileLoopConfig> unrollable_loop_bodies;
absl::flat_hash_map<const HloComputation*, HloInstruction*> bodies;
std::vector<
std::pair<std::function<const HloInstruction*(
const UnstackerMetadata&, const HloInstruction*, int64_t)>,
std::function<absl::Status(HloInstruction*, const Shape&)>>>
custom_handlers;
};
class UnstackerTransformer {
public:
explicit UnstackerTransformer(const UnstackerMetadata& metadata)
: metadata_(metadata) {}
bool HandleInstruction(const HloInstruction* instr, int64_t changed_idx) {
if (instr->opcode() != HloOpcode::kFusion) {
return false;
}
VLOG(3) << "HandleInstruction(" << instr->shape().ToString()
<< instr->name() << ", " << changed_idx << ")";
for (const auto& [custom_pattern, custom_handler] :
metadata_.custom_handlers) {
const HloInstruction* stacked_user =
custom_pattern(metadata_, instr, changed_idx);
if (stacked_user == nullptr) {
continue;
}
if (unstacking_computation_ != nullptr) {
VLOG(3) << "Seen multiple users, cannot handle. \n instr: "
<< instr->ToString() << "\n hoisted_computation: "
<< unstacking_computation_->ToString(
HloPrintOptions::Fingerprint());
return false;
}
unstacking_computation_ =
stacked_user->fused_instructions_computation()->Clone(
"hoisted_unstacking");
VLOG(3) << "Unstacking computation: "
<< unstacking_computation_->ToString(
HloPrintOptions::Fingerprint());
Shape slice_shape = stacked_user->shape();
int64_t num_layers = stacked_user->operand(0)->shape().dimensions(0);
std::vector<Shape> shapes;
for (int64_t i = 0; i < num_layers; ++i) {
shapes.push_back(slice_shape);
}
unstacked_shape_ =
std::make_unique<Shape>(ShapeUtil::MakeTupleShape(shapes));
unstacked_instrs_.push_back(instr);
std::function<absl::Status()> unstack_wrapper =
[&custom_handler = custom_handler, stacked_user,
slice_shape]() mutable -> absl::Status {
HloInstruction* mutable_dynamic_slicing_fusion =
const_cast<HloInstruction*>(stacked_user);
return custom_handler(mutable_dynamic_slicing_fusion, slice_shape);
};
body_changes_.push_back(unstack_wrapper);
return true;
}
return false;
}
std::vector<const HloInstruction*>& GetUnstackedInstructions() {
return unstacked_instrs_;
}
const Shape* GetUnstackedShape() const { return unstacked_shape_.get(); }
HloComputation* GetUnstackingComputation() const {
return unstacking_computation_.get();
}
std::vector<std::function<void(const Shape*)>>& GetLoopChanges() {
return loop_changes_;
}
std::vector<std::function<absl::Status()>>& GetBodyChanges() {
return body_changes_;
}
absl::flat_hash_map<HloInstruction*, int64_t>& GetOperandChanges() {
return operand_changes_;
}
void AddLoopChange(std::function<void(const Shape*)> loop_change) {
loop_changes_.push_back(loop_change);
}
private:
const UnstackerMetadata& metadata_;
std::unique_ptr<Shape> unstacked_shape_ = nullptr;
std::unique_ptr<HloComputation> unstacking_computation_ = nullptr;
std::vector<std::function<void(const Shape*)>> loop_changes_;
std::vector<std::function<absl::Status()>> body_changes_;
absl::flat_hash_map<HloInstruction*, int64_t> operand_changes_;
std::vector<const HloInstruction*> unstacked_instrs_;
};
bool CanUnstackWhileOperand(const HloInstruction* while_instr,
UnstackerTransformer& unstacker, int64_t index);
bool PropagateGteShapeChange(HloInstruction* gte,
UnstackerTransformer& unstacker) {
VLOG(5) << "PropagateGteShapeChange(" << gte->ToString() << ")";
absl::flat_hash_map<HloInstruction*, int64_t>& visited =
unstacker.GetOperandChanges();
std::deque<HloInstruction*> worklist;
worklist.push_back(gte);
visited.insert({gte, gte->tuple_index()});
while (!worklist.empty()) {
HloInstruction* changed_instr_to_propagate = worklist.front();
int64_t changed_operand_index =
FindOrDie(visited, changed_instr_to_propagate);
worklist.pop_front();
for (HloInstruction* user : changed_instr_to_propagate->users()) {
if (ContainsKey(visited, user)) {
continue;
}
if (user->opcode() == HloOpcode::kGetTupleElement) {
if (user->tuple_index() != changed_operand_index) {
continue;
}
visited.insert({user, changed_operand_index});
worklist.push_back(user);
} else if (user->opcode() == HloOpcode::kTuple) {
int64_t use_index = user->operand_index(changed_instr_to_propagate);
visited.insert({user, {use_index}});
worklist.push_back(user);
} else if (user->opcode() == HloOpcode::kWhile) {
bool changed_nested_while =
CanUnstackWhileOperand(user, unstacker, changed_operand_index);
if (!changed_nested_while) {
return false;
}
visited.insert({user, changed_operand_index});
worklist.push_back(user);
} else {
int64_t use_index = user->operand_index(changed_instr_to_propagate);
if (!unstacker.HandleInstruction(user, use_index)) {
VLOG(3) << "Custom unstacker not found for " << user->ToString();
return false;
}
}
}
}
return true;
}
bool CanPropagateGteShapeChangesInComputation(
const HloComputation* comp, const HloInstruction* operand,
UnstackerTransformer& shape_transformer, int64_t idx) {
VLOG(3) << "Propagating shape change of index " << idx
<< " in : " << comp->name();
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == idx) {
if (instr->operand(0) != operand) {
continue;
}
bool can_propagate = PropagateGteShapeChange(instr, shape_transformer);
if (!can_propagate) {
VLOG(3) << "Failed to propagate shape change for " << instr->ToString();
return false;
}
}
}
VLOG(3) << "Finish propagating shape change of index " << idx
<< " in: " << comp->name();
return true;
}
bool CanUnstackWhileOperand(const HloInstruction* while_instr,
UnstackerTransformer& unstacker, int64_t index) {
VLOG(5) << "ReplaceWhileOperandShape: " << while_instr->name() << " at "
<< index;
bool body_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->while_body(),
while_instr->while_body()->parameter_instruction(0), unstacker, index);
bool condition_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->while_condition(),
while_instr->while_condition()->parameter_instruction(0), unstacker,
index);
if (body_changes_collected && condition_changes_collected) {
auto loop_change = [](HloInstruction* loop, const Shape* new_shape,
int64_t idx) mutable {
Shape old_shape = ShapeUtil::MakeStaticShape(
loop->while_body()->parameter_instruction(0)->shape());
ShapeUtil::UpdateTupleShape(*new_shape, idx, &old_shape);
loop->while_body()->ReplaceParameter(
0, HloInstruction::CreateParameter(0, old_shape, "unstacked"));
loop->while_condition()->ReplaceParameter(
0, HloInstruction::CreateParameter(0, old_shape, "unstacked"));
};
auto loop_change_wrapper = [&loop_change, while_instr,
index](const Shape* new_shape) {
HloInstruction* mutable_loop = const_cast<HloInstruction*>(while_instr);
loop_change(mutable_loop, new_shape, index);
};
unstacker.AddLoopChange(loop_change_wrapper);
return true;
}
return false;
}
void UnstackWhileInput(const UnstackerTransformer& unstacker,
HloInstruction* while_instr, const Shape* new_shape,
int64_t index) {
const Shape& slice_shape = new_shape->tuple_shapes(0);
HloInstruction* old_while_input =
while_instr->while_init()->mutable_operand(index);
std::vector<HloInstruction*> slices;
for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {
std::vector<HloInstruction*> operands = {
old_while_input, while_instr->AddInstruction(MakeConstantWithShape(
unstacker.GetUnstackingComputation()
->parameter_instruction(1)
->shape(),
i))};
HloInstruction* slice =
while_instr->AddInstruction(HloInstruction::CreateFusion(
slice_shape, HloInstruction::FusionKind::kLoop, operands,
while_instr->GetModule()->AddEmbeddedComputation(
unstacker.GetUnstackingComputation()->Clone()),
"hoisted"));
slices.push_back(slice);
}
HloInstruction* new_operand_element =
while_instr->AddInstruction(HloInstruction::CreateTuple(slices));
HloInstruction* new_while_init =
TupleUtil::ReplaceTupleWith(new_operand_element,
while_instr->while_init(), {index}, false)
.value();
CHECK_OK(while_instr->ReplaceOperandWithDifferentShape(0, new_while_init));
}
bool UnstackWhileOperandAtIndex(
const UnstackerMetadata& metadata, HloInstruction* while_instr,
int64_t index, std::vector<const HloInstruction*>& unstacked_instructions) {
UnstackerTransformer unstacker = UnstackerTransformer(metadata);
bool can_unstack = CanUnstackWhileOperand(while_instr, unstacker, index);
if (!can_unstack) {
return false;
}
bool parent_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->parent(), while_instr, unstacker, index);
if (!parent_changes_collected) {
return false;
}
if (unstacker.GetUnstackedShape() == nullptr) {
return false;
}
for (const auto& [instr, index] : unstacker.GetOperandChanges()) {
switch (instr->opcode()) {
case HloOpcode::kGetTupleElement:
*instr->mutable_shape() = *unstacker.GetUnstackedShape();
break;
case HloOpcode::kTuple:
*instr->mutable_shape()->mutable_tuple_shapes(index) =
*unstacker.GetUnstackedShape();
break;
case HloOpcode::kWhile:
ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), index,
instr->mutable_shape());
break;
default:
LOG(FATAL) << "Unsupported opcode: " << instr->ToString();
}
}
for (const auto& body_change : unstacker.GetBodyChanges()) {
CHECK_OK(body_change());
}
UnstackWhileInput(unstacker, while_instr, unstacker.GetUnstackedShape(),
index);
const Shape& new_while_shape = while_instr->while_init()->shape();
*while_instr->mutable_shape() = new_while_shape;
for (auto& loop_change : unstacker.GetLoopChanges()) {
loop_change(unstacker.GetUnstackedShape());
}
for (const HloInstruction* instr : unstacker.GetUnstackedInstructions()) {
unstacked_instructions.push_back(instr);
}
return true;
}
const HloInstruction* IsDynamicSlicingFusion(const UnstackerMetadata& metadata,
const HloInstruction* instr,
int64_t stacked_operand_idx) {
CHECK_EQ(instr->opcode(), HloOpcode::kFusion);
if (instr->fused_parameters().size() != 2) {
return nullptr;
}
if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {
VLOG(5) << "Instruction not inside unrollable while body, "
<< instr->ToString() << instr->parent()->ToString();
return nullptr;
}
WhileLoopConfig while_instr_config =
metadata.unrollable_loop_bodies.at(instr->parent());
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (!Match(fused_instr, match::DynamicSlice())) {
continue;
}
std::optional<int64_t> dynamic_index =
MatchShapeCoveringDynamicIndexInstruction(
fused_instr,
instr->fused_instructions_computation()->parameter_instruction(
stacked_operand_idx),
HloOpcode::kDynamicSlice, while_instr_config);
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
HloInstruction* bitcast_operand = nullptr;
if (Match(instr->fused_instructions_computation()->root_instruction(),
match::Bitcast(match::Op(&bitcast_operand)))) {
if (bitcast_operand == fused_instr) {
return instr;
}
}
}
}
return nullptr;
}
absl::Status UnstackDynamicSlicingFusion(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();
HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(
new_operand);
}
const HloInstruction* GetNestedDynamicSlicingFusion(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
CHECK_EQ(instr->opcode(), HloOpcode::kFusion);
if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {
VLOG(5) << "Instruction not inside unrollable while body, "
<< instr->ToString() << instr->parent()->ToString();
return nullptr;
}
WhileLoopConfig while_instr_config =
metadata.unrollable_loop_bodies.at(instr->parent());
HloInstruction* inner_fusion_user = nullptr;
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (Match(fused_instr, match::Parameter(stacked_operand_idx))) {
if (fused_instr->user_count() != 1) {
return nullptr;
}
if (Match(fused_instr->users()[0],
match::Fusion(match::Op(), match::Op()))) {
inner_fusion_user = fused_instr->users()[0];
break;
}
}
}
if (inner_fusion_user == nullptr) {
return nullptr;
}
for (HloInstruction* inner_fusion_instr :
inner_fusion_user->fused_instructions_computation()
->MakeInstructionPostOrder()) {
if (!Match(inner_fusion_instr, match::DynamicSlice())) {
continue;
}
std::optional<int64_t> dynamic_index =
MatchShapeCoveringDynamicIndexInstruction(
inner_fusion_instr,
inner_fusion_user->fused_instructions_computation()
->parameter_instruction(0),
HloOpcode::kDynamicSlice, while_instr_config);
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
return inner_fusion_user;
}
}
return nullptr;
}
absl::Status UnstackNestedDynamicSlicingFusion(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloInstruction* parent_fusion =
mutable_dynamic_slicing_fusion->parent()->FusionInstruction();
VLOG(3) << "Found shape-covering dynamic slicing fusion inside a fusion: "
<< mutable_dynamic_slicing_fusion->name() << " inside "
<< parent_fusion->name();
HloInstruction* stacked_in_ds_fusion =
mutable_dynamic_slicing_fusion->mutable_operand(0);
CHECK_EQ(stacked_in_ds_fusion->opcode(), HloOpcode::kParameter);
int64_t stacked_param_number = stacked_in_ds_fusion->parameter_number();
HloInstruction* stacked =
parent_fusion->mutable_operand(stacked_param_number);
HloInstruction* offset_in_ds_fusion =
mutable_dynamic_slicing_fusion->mutable_operand(1);
CHECK_EQ(offset_in_ds_fusion->opcode(), HloOpcode::kParameter);
HloInstruction* offset =
parent_fusion->mutable_operand(offset_in_ds_fusion->parameter_number());
HloInstruction* sliced_param =
parent_fusion->fused_instructions_computation()->ReplaceParameter(
stacked_param_number,
HloInstruction::CreateParameter(stacked_param_number, slice_shape,
"sliced"));
TF_RETURN_IF_ERROR(
mutable_dynamic_slicing_fusion->ReplaceAllUsesWith(sliced_param));
TF_RETURN_IF_ERROR(
parent_fusion->fused_instructions_computation()
->RemoveInstructionAndUnusedOperands(mutable_dynamic_slicing_fusion));
std::vector<Shape> parameters =
parent_fusion->fused_instructions_computation()
->ComputeProgramShape()
.parameters();
parameters.at(stacked_param_number) = slice_shape;
*parent_fusion->fused_instructions_computation()
->ComputeProgramShape()
.mutable_parameters() = parameters;
HloInstruction* new_operand =
parent_fusion->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return parent_fusion->ReplaceOperandWithDifferentShape(stacked_param_number,
new_operand);
}
};
absl::StatusOr<bool> HloUnstacker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(auto metadata, UnstackerMetadata::Create(module));
metadata.custom_handlers.push_back(
std::make_pair(IsDynamicSlicingFusion, UnstackDynamicSlicingFusion));
metadata.custom_handlers.push_back(std::make_pair(
GetNestedDynamicSlicingFusion, UnstackNestedDynamicSlicingFusion));
bool unstacked = false;
std::vector<const HloInstruction*> unstacked_instructions;
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() != HloOpcode::kWhile) {
continue;
}
for (int64_t i = 0; i < instr->shape().tuple_shapes_size(); ++i) {
VLOG(3) << "Attempting to unstack " << instr->name() << " at " << i
<< " with stacked shape "
<< instr->shape().tuple_shapes(i).ToString();
if (UnstackWhileOperandAtIndex(metadata, instr, i,
unstacked_instructions)) {
VLOG(3) << "Unstacked " << instr->name() << " at | #include "xla/service/hlo_unstacker.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using UnstackerTest = HloTestBase;
TEST_F(UnstackerTest, UnstackLoopSingleFusionUser) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) ->
s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128]
%param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694),
dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128]
bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[],
bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop,
calls=%fused_computation.slice conv = bf16[8,128] convolution(bf16[8,128]
p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf ROOT out = (s32[],
bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[]
%constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input),
condition=%while.cond , body=%while.body while_use = s8[3,128,128]
get-tuple-element(while.out), index=2 ROOT out = bf16[8,128]
get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt));
}
TEST_F(UnstackerTest, UnstackLoopSingleNestedFusionUser) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackLoopSingleNestedFusionUserMultipleIndex) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[4,128,128] get-tuple-element(wide_p), index=2
p2 = s8[4,128,128] get-tuple-element(wide_p), index=3
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv.1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1
fusion.conv.2 = bf16[8,128] fusion(p0, p2, i), kind=kOutput, calls=%fused_computation.inner.2
plus = bf16[8,128] add(fusion.conv.1, fusion.conv.2)
ROOT out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(inc, plus, p1, p2)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[4,128,128] parameter(0)
p1 = s8[4,128,128] parameter(1)
p2 = bf16[8,128] parameter(2)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(init, p2, p0, p1)
while.out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackLoopSingleNestedFusionUserDiffereOperandsOrder) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_1.30691: s8[3,128,128], p2: s32[], param_0.34523: bf16[8,128]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(2)
%param_1.30691 = s8[3,128,128] parameter(0)
p2 = s32[] parameter(1)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(p1, i, p0), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, NotUnstackLoopMultipleNestedFusionUsers) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1
fusion.conv2 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.2
add = bf16[8,128] add(fusion.conv1, fusion.conv2)
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, add, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_FALSE(unstacked);
}
TEST_F(UnstackerTest, UnstackMultipleLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner1, body=%while.body.inner1
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
%fused_computation.slice2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner2
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner2, body=%while.body.inner2
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
ENTRY main {
weight = s8[4,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(1)
while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond1 , body=%while.body1
second.while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
second.while.out = (s32[], bf16[8,128], s8[4,128,128]) while(second.while.input), condition=%while.cond2 , body=%while.body2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedLoopSingleNestedFusionUser) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner, body=%while.body.inner
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
ENTRY main {
weight = s8[4,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(1)
while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
}
} |
1,916 | cpp | tensorflow/tensorflow | sharding_propagation | third_party/xla/xla/service/sharding_propagation.cc | third_party/xla/xla/service/sharding_propagation_test.cc | #ifndef XLA_SERVICE_SHARDING_PROPAGATION_H_
#define XLA_SERVICE_SHARDING_PROPAGATION_H_
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/call_graph.h"
#include "xla/service/custom_call_sharding_helper.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
bool InferDotShardingFromOperands(
HloInstruction* instruction, const CallGraph& call_graph,
const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,
bool may_combine_partial_sharding, bool is_spmd);
bool InferConvolutionShardingFromOperands(HloInstruction* instruction,
const CallGraph& call_graph,
int64_t aggressiveness,
bool may_combine_partial_sharding,
bool is_spmd);
absl::StatusOr<bool> ProcessShardingInstruction(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
bool replace_sharding_with_copy,
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>*
unspecified_dims,
std::vector<HloSharding>* saved_root_shardings,
absl::flat_hash_map<int64_t, HloSharding>* saved_parameter_shardings,
absl::flat_hash_map<HloInstruction*, int64_t>*
instruction_to_shard_group_id = nullptr,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>*
shard_group_id_to_shard_as_group = nullptr,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>*
shard_group_id_to_shard_like_group = nullptr,
const std::vector<bool>*
allow_spmd_sharding_propagation_to_parameters_vector = nullptr);
int64_t ComputeNonRootUsers(const HloInstruction* instr);
std::optional<HloSharding> InferBroadcastOperandSharding(
const HloInstruction& instruction, bool is_spmd = true);
bool InferReduceShardingFromOperand(HloInstruction* instruction,
bool may_combine_partial_sharding,
bool is_spmd);
class ShardingPropagation : public HloModulePass {
public:
using ComputationMap =
absl::flat_hash_map<const HloComputation*, HloInstruction*>;
explicit ShardingPropagation(
bool is_spmd = false, bool propagate_metadata = false,
absl::Span<const bool> allow_spmd_sharding_propagation_to_output =
{false},
absl::Span<const bool> allow_spmd_sharding_propagation_to_parameters =
{false},
bool cse_prevention_only = false,
std::unique_ptr<CustomCallShardingHelper> sharding_helper = nullptr)
: is_spmd_(is_spmd),
propagate_metadata_(propagate_metadata),
allow_spmd_sharding_propagation_to_output_(
absl::c_any_of(allow_spmd_sharding_propagation_to_output,
[](bool v) { return v; })),
allow_spmd_sharding_propagation_to_parameters_(
absl::c_any_of(allow_spmd_sharding_propagation_to_parameters,
[](bool v) { return v; })),
allow_spmd_sharding_propagation_to_output_vector_(
allow_spmd_sharding_propagation_to_output.begin(),
allow_spmd_sharding_propagation_to_output.end()),
allow_spmd_sharding_propagation_to_parameters_vector_(
allow_spmd_sharding_propagation_to_parameters.begin(),
allow_spmd_sharding_propagation_to_parameters.end()),
cse_prevention_only_(cse_prevention_only) {
if (sharding_helper) {
sharding_helper_ = std::move(sharding_helper);
} else {
sharding_helper_ = std::make_unique<CustomCallShardingHelper>();
}
}
absl::string_view name() const override { return "sharding-propagation"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
static absl::Status NormalizeDomain(const DomainMetadata::Domain& domain,
const DomainMetadata* metadata);
static std::optional<HloSharding> GetShardingFromUser(
const HloInstruction& instruction, const HloInstruction& user,
int64_t aggressiveness, bool is_spmd, const CallGraph& call_graph,
const CustomCallShardingHelper* sharding_helper);
private:
bool InferShardingFromShardGroup(
HloInstruction* instruction, const ComputationMap& computation_map,
int64_t aggressiveness,
const absl::flat_hash_set<HloInstruction*>& shard_group);
bool InferShardingFromOperands(
HloInstruction* instruction, const ComputationMap& computation_map,
int64_t aggressiveness, const CallGraph& call_graph,
const absl::flat_hash_set<absl::string_view>& execution_threads);
bool InferShardingFromUsers(
HloInstruction* instruction,
const ShardingPropagation::ComputationMap& computation_map,
int64_t aggressiveness, bool is_spmd,
const CustomCallShardingHelper* sharding_helper,
const CallGraph& call_graph);
std::unique_ptr<CustomCallShardingHelper> sharding_helper_;
bool is_spmd_;
bool propagate_metadata_;
bool allow_spmd_sharding_propagation_to_output_;
bool allow_spmd_sharding_propagation_to_parameters_;
std::vector<bool> allow_spmd_sharding_propagation_to_output_vector_;
std::vector<bool> allow_spmd_sharding_propagation_to_parameters_vector_;
bool cse_prevention_only_;
};
}
#endif
#include "xla/service/sharding_propagation.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/spmd/shard_barrier_partitioner.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/sharding_op_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::optional<HloSharding> ReturnImprovedSharding(
HloSharding sharding, HloInstruction* instruction,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding),
instruction->has_sharding() ? &instruction->sharding() : nullptr,
instruction->shape(), may_combine_partial_sharding,
allow_aggressive_resharding);
}
std::optional<HloSharding> ReturnImprovedSubSharding(
HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (instruction->has_sharding()) {
const HloSharding to_improved =
instruction->sharding().GetSubSharding(instruction->shape(), index);
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding), &to_improved,
ShapeUtil::GetSubshape(instruction->shape(), index),
may_combine_partial_sharding, allow_aggressive_resharding);
} else {
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding), nullptr,
ShapeUtil::GetSubshape(instruction->shape(), index),
may_combine_partial_sharding, allow_aggressive_resharding);
}
}
bool MaybeImproveInstructionSharding(HloSharding sharding,
HloInstruction* instruction,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (auto new_sharding = ReturnImprovedSharding(
std::move(sharding), instruction, may_combine_partial_sharding,
allow_aggressive_resharding)) {
instruction->set_sharding(std::move(*new_sharding));
return true;
}
return false;
}
bool MaybeImproveInstructionSubSharding(
HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (instruction->shape().IsTuple()) {
if (auto new_sub_sharding = ReturnImprovedSubSharding(
std::move(sharding), instruction, index,
may_combine_partial_sharding, allow_aggressive_resharding)) {
HloSharding new_sharding =
instruction->has_sharding()
? instruction->sharding()
: HloSharding::Single(instruction->shape(),
HloSharding::Replicate());
ShapeTree<HloSharding> sharding_shape_tree =
new_sharding.GetAsShapeTree(instruction->shape());
*sharding_shape_tree.mutable_element(index) = new_sub_sharding.value();
instruction->set_sharding(HloSharding::Tuple(sharding_shape_tree));
return true;
} else {
return false;
}
}
CHECK(index.size() == 1 && index[0] == 0);
return MaybeImproveInstructionSharding(std::move(sharding), instruction,
may_combine_partial_sharding,
allow_aggressive_resharding);
}
bool IsConvolutionKernelSmall(const HloInstruction* instruction) {
CHECK_EQ(instruction->opcode(), HloOpcode::kConvolution);
const HloInstruction* rhs = instruction->operand(1);
const auto& dnums = instruction->convolution_dimension_numbers();
int64_t kernel_dim_prod = 1;
int64_t output_dim_prod = 1;
for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) {
int64_t kernel_dim =
rhs->shape().dimensions(dnums.kernel_spatial_dimensions(i));
kernel_dim_prod *= kernel_dim;
int64_t output_dim =
instruction->shape().dimensions(dnums.output_spatial_dimensions(i));
output_dim_prod *= output_dim;
if (kernel_dim >= output_dim &&
(i < 2 || kernel_dim > 3 || kernel_dim_prod >= output_dim_prod)) {
return false;
}
}
return true;
}
bool IsPassthroughCustomOps(const HloInstruction* hlo) {
if (hlo->IsCustomCall({"Sharding", "X64Combine", "LayoutConstraint"})) {
return true;
}
if (hlo->operand_count() != 1 || !hlo->shape().IsArray() ||
!hlo->operand(0)->shape().IsArray() ||
hlo->operand(0)->shape().rank() != hlo->shape().rank()) {
return false;
}
return hlo->IsCustomCall(
{"ResizeNearest", "ResizeBilinear", "ResizeNearestGrad",
"ResizeBilinearGrad", "Cholesky",
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget,
host_memory_offload_annotations::kMoveToHostCustomCallTarget});
}
const HloInstruction* PickRepresentativeOperand(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kMap:
case HloOpcode::kPad:
case HloOpcode::kPower:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
if (instruction->operand(0)->has_sharding()) {
return instruction->operand(0);
}
return nullptr;
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kDivide:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRemainder:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kTopK:
case HloOpcode::kSort:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kWhile:
case HloOpcode::kXor: {
const HloInstruction* best_operand = nullptr;
for (const HloInstruction* operand : instruction->operands()) {
if (operand->has_sharding() &&
(best_operand == nullptr ||
hlo_sharding_util::IsShardingMoreSpecific(
operand->sharding(), best_operand->sharding()))) {
best_operand = operand;
}
}
return best_operand;
}
case HloOpcode::kCustomCall: {
if (IsPassthroughCustomOps(instruction)) {
return instruction->operand(0);
}
return nullptr;
}
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCholesky:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kDynamicReshape:
case HloOpcode::kFft:
case HloOpcode::kFusion:
case HloOpcode::kGather:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kPartitionId:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kReplicaId:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
return nullptr;
}
}
bool SupportSpatialPartitioning(
const HloInstruction* instruction,
const ShardingPropagation::ComputationMap& computation_map, bool is_spmd,
bool allow_spmd_sharding_propagation_to_output,
bool allow_spmd_sharding_propagation_to_parameters,
const CustomCallShardingHelper* sharding_helper) {
const bool is_entry_root = instruction->parent()
->parent()
->entry_computation()
->root_instruction() == instruction;
if (instruction->parent()->root_instruction() == instruction &&
computation_map.find(instruction->parent()) == computation_map.end() &&
!(is_entry_root && allow_spmd_sharding_propagation_to_output)) {
return false;
}
if (instruction->IsElementwise() &&
(instruction->opcode() != HloOpcode::kRng || is_spmd)) {
return true;
}
switch (instruction->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kReduce:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
return true;
case HloOpcode::kParameter:
return allow_spmd_sharding_propagation_to_parameters ||
computation_map.find(instruction->parent()) !=
computation_map.end();
case HloOpcode::kReverse:
return is_spmd;
case HloOpcode::kCustomCall:
if (!is_spmd) {
return false;
}
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target())) {
return partitioner->IsCustomCallShardable(instruction);
}
return (IsPassthroughCustomOps(instruction) ||
sharding_helper->IsCustomCallShardable(instruction));
default:
return false;
}
}
std::optional<HloSharding> LookaheadUserSharding(HloInstruction* instr,
bool is_spmd,
const CallGraph& call_graph) {
if (instr->user_count() != 1) {
return std::nullopt;
}
HloInstruction* current_user = instr->users()[0];
std::optional<HloSharding> sharding;
std::vector<HloInstruction*> users_chain = {instr, current_user};
while (!current_user->has_sharding()) {
if (current_user->users().size() != 1) {
users_chain.clear();
break;
}
current_user = current_user->users()[0];
users_chain.push_back(current_user);
}
if (users_chain.empty()) {
return std::nullopt;
}
for (int i = users_chain.size() - 1; i >= 1; --i) {
HloInstruction* user = users_chain[i];
HloInstruction* current = users_chain[i - 1];
CHECK(user->has_sharding());
sharding = ShardingPropagation::GetShardingFromUser(
*current, *user, INT64_MAX, is_spmd, call_graph,
nullptr);
if (sharding.has_value() && i != 1) {
current->set_sharding(*sharding);
continue;
}
break;
}
for (int i = 1; i < users_chain.size() - 1; ++i) {
users_chain[i]->clear_sharding();
}
return sharding;
}
bool InferGatherParallelShardingFromOperands(
HloInstruction* instruction,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,
bool may_combine_partial_sharding) {
CHECK(DynCast<HloGatherInstruction>(instruction));
bool changed = false;
auto aligned_operand_parallel_dims =
hlo_sharding_util::IndexAlignedOperandParallelDims(parallel_dims);
auto output_parallel_dims = hlo_sharding_util::GetGatherParallelOutputDims(
*instruction, parallel_dims);
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
instruction->operand(0)->sharding(),
instruction->operand(0)->shape(), instruction->shape(),
absl::MakeConstSpan(aligned_operand_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
instruction->operand(1)->sharding(),
instruction->operand(1)->shape(), instruction->shape(),
absl::MakeConstSpan(parallel_dims.indices_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, may_combine_partial_sharding);
}
return changed;
}
bool InferScatterParallelShardingFromOperands(
HloInstruction* instruction,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,
bool may_combine_partial_sharding) {
HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(instruction);
CHECK(scatter);
const int64_t operand_count = scatter->scatter_operand_count();
auto scatter_operands = scatter->scatter_operands();
auto scatter_indices = scatter->scatter_indices();
auto scatter_updates = scatter->scatter_updates();
bool changed = false;
auto aligned_operand_parallel_dims =
hlo_sharding_util::IndexAlignedOperandParallelDims(parallel_dims);
auto update_parallel_dims = hlo_sharding_util::GetScatterParallelUpdateDims(
*instruction, parallel_dims);
auto output_parallel_dims = aligned_operand_parallel_dims;
Shape shape = operand_count == 1
? instruction->shape()
: ShapeUtil::GetSubshape(instruction->shape(), {0});
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {
changed |= MaybeImproveInstructionSubSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_operands[i]->sharding(), scatter_operands[i]->shape(),
shape, absl::MakeConstSpan(aligned_operand_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, {i}, may_combine_partial_sharding);
}
}
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)) {
auto parallel_sharding_from_indices = hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_indices->sharding(), scatter_indices->shape(), shape,
absl::MakeConstSpan(parallel_dims.indices_parallel_dims),
absl::MakeConstSpan(output_parallel_dims));
for (int64_t i = 0; i != operand_count; ++i) {
changed |= MaybeImproveInstructionSubSharding(
parallel_sharding_from_indices, instruction, {i},
may_combine_partial_sharding);
}
}
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {
changed |= MaybeImproveInstructionSubSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_updates[i]->sharding(), scatter_updates[i]->shape(),
shape, absl::MakeConstSpan(update_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, {i}, may_combine_partial_sharding);
}
}
return changed;
}
bool CanPropagateThroughAtAggressiveLevel(const HloInstruction& inst,
int64_t aggressiveness) {
if (aggressiveness < 1 &&
!(inst.IsElementwise() || inst.IsCustomCall("Sharding")) &&
inst.opcode() != HloOpcode::kTranspose &&
inst.opcode() != HloOpcode::kReshape &&
inst.opcode() != HloOpcode::kTuple &&
inst.opcode() != HloOpcode::kGetTupleElement &&
inst.opcode() != HloOpcode::kWhile &&
inst.opcode() != HloOpcode::kDynamicSlice &&
inst.opcode() != HloOpcode::kDynamicUpdateSlice &&
inst.opcode() != HloOpcode::kOptimizationBarrier &&
inst.opcode() != HloOpcode::kConcatenate &&
inst.opcode() != HloOpcode::kCall && inst.opcode() != HloOpcode::kCopy) {
return false;
}
if (aggressiveness < 2 && inst.opcode() == HloOpcode::kBroadcast) {
return false;
}
return true;
}
bool SameShardingMetadata(const HloSharding& a, const HloSharding& b) {
DCHECK_EQ(a, b);
auto same_metadata = [](absl::Span<const OpMetadata> a,
absl::Span<const OpMetadata> b) {
if (a.size() != b.size()) return false;
for (int i = 0, e = a.size(); i < e; ++i) {
if (!protobuf_util::ProtobufEquals(a[i], b[i])) {
return false;
}
}
return true;
};
if (a.IsTuple()) {
for (int i = 0, e = a.tuple_elements().size(); i < e; ++i) {
if (!same_metadata(a.tuple_elements()[i].metadata(),
b.tuple_elements()[i].metadata())) {
return false;
}
}
return true;
} else {
return same_metadata(a.metadata(), b.metadata());
}
}
bool AssignShardingMetadata(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
const auto& metadata = instruction->metadata();
if (!instruction->has_sharding() || metadata.ByteSizeLong() == 0) {
continue;
}
HloSharding sharding_with_metadata =
instruction->sharding().WithMetadata({metadata},
false);
if (!SameShardingMetadata(instruction->sharding(),
sharding_with_metadata)) {
instruction->set_sharding(std::move(sharding_with_metadata));
changed = t | #include "xla/service/sharding_propagation.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ShardingPropagationTest = HloTestBase;
void ClearMetadata(HloModule* module) {
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->metadata().ByteSizeLong() != 0) {
instruction->set_metadata(OpMetadata());
}
if (!instruction->has_sharding()) {
continue;
}
instruction->set_sharding(instruction->sharding().WithoutMetadata());
}
}
}
struct MetadataTestParameter {
explicit MetadataTestParameter(bool propagate_metadata, bool clear_metadata)
: propagate_metadata(propagate_metadata),
clear_metadata(clear_metadata) {}
bool propagate_metadata = false;
bool clear_metadata = false;
};
struct MetadataTestParameterWithOutput {
explicit MetadataTestParameterWithOutput(bool propagate_metadata,
bool clear_metadata,
bool allow_root_sharding_propagation)
: propagate_metadata(propagate_metadata),
clear_metadata(clear_metadata),
allow_root_sharding_propagation(allow_root_sharding_propagation) {}
bool propagate_metadata = false;
bool clear_metadata = false;
bool allow_root_sharding_propagation = false;
};
class ParameterizedMetadataTest
: public HloTestBase,
public ::testing::WithParamInterface<MetadataTestParameter> {};
class ParameterizedMetadataTestWithOutput
: public HloTestBase,
public ::testing::WithParamInterface<MetadataTestParameterWithOutput> {};
std::string OpMetadataListToString(absl::Span<const OpMetadata> metadata) {
std::vector<std::string> metadata_strings;
metadata_strings.reserve(metadata.size());
for (const OpMetadata& element : metadata) {
metadata_strings.push_back(
absl::StrCat("{", OpMetadataToString(element), "}"));
}
return absl::StrCat("{", absl::StrJoin(metadata_strings, ", "), "}");
}
class HloShardingMetadataMatcher
: public ::testing::MatcherInterface<const HloSharding&> {
public:
explicit HloShardingMetadataMatcher(absl::Span<const OpMetadata> metadata)
: metadata_(metadata.begin(), metadata.end()) {}
bool MatchAndExplain(
const HloSharding& sharding,
::testing::MatchResultListener* listener) const override {
if (sharding.metadata().size() != metadata_.size()) {
*listener << sharding.ToString(true)
<< " has incorrect sharding metadata (expected: "
<< OpMetadataListToString(metadata_) << ")";
return false;
}
for (int i = 0, e = metadata_.size(); i < e; ++i) {
if (!protobuf_util::ProtobufEquals(sharding.metadata()[i],
metadata_[i])) {
*listener << sharding.ToString(true)
<< " has incorrect sharding metadata (expected: "
<< OpMetadataListToString(metadata_) << ")";
return false;
}
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << OpMetadataListToString(metadata_);
}
private:
std::vector<OpMetadata> metadata_;
};
::testing::Matcher<const HloSharding&> ShardingMetadata(
absl::Span<const OpMetadata> metadata) {
return ::testing::MakeMatcher(new HloShardingMetadataMatcher(metadata));
}
OpMetadata CreateMetadata(const std::string& op_name) {
OpMetadata metadata;
metadata.set_op_name(op_name);
return metadata;
}
INSTANTIATE_TEST_SUITE_P(
ShardingPropagation, ParameterizedMetadataTest,
::testing::Values(MetadataTestParameter(false,
false),
MetadataTestParameter(false,
true),
MetadataTestParameter(true,
false),
MetadataTestParameter(true,
true)),
[](const ::testing::TestParamInfo<MetadataTestParameter>& info) {
return absl::StrCat(info.param.propagate_metadata
? "MetadataPropagation"
: "NoMetadataPropagation",
"_",
info.param.clear_metadata ? "NoMetadataInModule"
: "MetadataInModule");
});
INSTANTIATE_TEST_SUITE_P(
ShardingPropagation, ParameterizedMetadataTestWithOutput,
::testing::Values(MetadataTestParameterWithOutput(
false,
false,
false),
MetadataTestParameterWithOutput(
false,
true,
false),
MetadataTestParameterWithOutput(
true,
false,
false),
MetadataTestParameterWithOutput(
true,
true,
false),
MetadataTestParameterWithOutput(
false,
false,
true),
MetadataTestParameterWithOutput(
false,
true,
true),
MetadataTestParameterWithOutput(
true,
false,
true),
MetadataTestParameterWithOutput(
true,
true,
true)),
[](const ::testing::TestParamInfo<MetadataTestParameterWithOutput>& info) {
return absl::StrCat(
info.param.propagate_metadata ? "MetadataPropagation"
: "NoMetadataPropagation",
"_",
info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule",
"_",
info.param.allow_root_sharding_propagation ? "PropagateToRoot"
: "NoPropagateToRoot");
});
TEST_P(ParameterizedMetadataTest, ShardingMetadataFromInstruction) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3},
metadata={op_name="test"}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("test")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoOverwrite) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}},
metadata={op_name="test"}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("name")}));
}
TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoMetadata) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("name")}));
}
TEST_F(ShardingPropagationTest, ShardingNoMetadataAndInstructionNoMetadata) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
TEST_P(ParameterizedMetadataTest, ElementwiseOperationForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ElementwiseOperationBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048,2048]{2,1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
%broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%param0), dimensions={0,1,2}
ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048,2048]{2,1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
%shard-barrier-from = f32[3,2048,2048]{2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%shard-barrier-from), dimensions={0,1,2}
ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, BroadcastBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[13]{0} parameter(0)
%broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%param0), dimensions={3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[13]{0} parameter(0)
%param0_copy = f32[13]{0} copy(param0)
%shard-barrier-to = f32[13]{0} custom-call(%param0_copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%shard-barrier-to), dimensions={3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "param0_copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, Broadcast1DBackwardNoChange) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = s32[128]{0} parameter(0)
%constant0 = s32[] constant(0), sharding={replicated}
%broadcast = s32[128]{0} broadcast(%constant0), dimensions={}, sharding={replicated}
ROOT %compare = pred[128]{0} compare(s32[128]{0} %param0, s32[128]{0} %broadcast),
direction=NE, sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048]parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}
ROOT %copy = f32[3,2048,3] copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastMerge) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048]parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}
ROOT %copy = f32[3,2048,3] copy(%broadcast),
sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[24,8]{0,1} parameter(0)
%copy = f32[24,8]{0,1} copy(%param0)
ROOT %broadcast = f32[4,24,6,8]{3,2,1,0} broadcast(%copy), dimensions={1,3},
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastUserPartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[24,8]{0,1} parameter(0)
%copy = f32[24,8]{0,1} copy(%param0)
ROOT %broadcast = f32[4,24,6,8] broadcast(%copy), dimensions={1,3},
sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4,2,1,1]0,1,2,3,4,5,6,7}"));
}
}
TEST_P(ParameterizedMetadataTest, MaximalReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[5,7]{1,0} reduce(%param0, %init), dimensions={2,3}, to_apply=%add
ROOT %copy = f32[5,7]{0,1} copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ShardedReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[7,11]{1,0} reduce(%param0, %init), dimensions={0,3}, to_apply=%add
ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%reduce = f32[7,11]{1,0} reduce(%shard-barrier-from, %init), dimensions={0,3}, to_apply=%add
ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,2,1,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims2) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT( |
1,917 | cpp | tensorflow/tensorflow | hlo_dce | third_party/xla/xla/service/hlo_dce.cc | third_party/xla/xla/service/hlo_dce_test.cc | #ifndef XLA_SERVICE_HLO_DCE_H_
#define XLA_SERVICE_HLO_DCE_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloDCE : public HloModulePass {
public:
HloDCE() : remove_cross_partition_collective_ops_(false) {}
explicit HloDCE(bool remove_cross_partition_collective_ops)
: remove_cross_partition_collective_ops_(
remove_cross_partition_collective_ops) {}
~HloDCE() override {}
absl::string_view name() const override { return "dce"; }
static absl::StatusOr<bool> RunOnComputation(
HloComputation* computation, bool remove_cross_partition_collective_ops);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RecursivelyRemoveDeadComputations(HloModule* module);
absl::Status RecursivelyRemoveDeadComputation(
HloModule* module, HloComputation* computation,
absl::flat_hash_map<HloComputation*, int>& live_call_counts);
bool remove_cross_partition_collective_ops_;
};
}
#endif
#include "xla/service/hlo_dce.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsRemovableWhile(HloInstruction* instruction,
bool remove_cross_partition_collective_ops) {
if (instruction->opcode() != HloOpcode::kWhile) {
return false;
}
for (HloComputation* computation : instruction->called_computations()) {
for (HloInstruction* called_instr : computation->instructions()) {
auto maybe_collective_op =
DynCast<HloCollectiveInstruction>(called_instr);
if (called_instr->HasSideEffect() &&
(!remove_cross_partition_collective_ops || !maybe_collective_op ||
maybe_collective_op->constrain_layout())) {
return false;
}
}
}
return true;
}
}
absl::StatusOr<bool> HloDCE::RunOnComputation(
HloComputation* computation, bool remove_cross_partition_collective_ops) {
bool changed = false;
VLOG(3) << "Before dce:";
XLA_VLOG_LINES(3, computation->ToString());
if (auto* fusion_instruction = computation->FusionInstruction();
fusion_instruction != nullptr &&
computation->root_instruction()->opcode() == HloOpcode::kTuple &&
!computation->root_instruction()->has_sharding() &&
fusion_instruction->output_operand_aliasing().empty() &&
!fusion_instruction->HasControlDependencies() &&
fusion_instruction->user_count() <
computation->root_instruction()->operand_count() &&
!fusion_instruction->IsCustomFusion()) {
std::vector<int64_t> used_tuple_elements;
used_tuple_elements.reserve(fusion_instruction->user_count());
bool supported = fusion_instruction->user_count() > 0;
for (HloInstruction* gte : fusion_instruction->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
supported = false;
break;
}
used_tuple_elements.push_back(gte->tuple_index());
}
if (supported) {
std::sort(used_tuple_elements.begin(), used_tuple_elements.end());
std::vector<Shape> tuple_shapes;
tuple_shapes.reserve(used_tuple_elements.size());
for (int64_t tuple_index : used_tuple_elements) {
tuple_shapes.push_back(
fusion_instruction->shape().tuple_shapes(tuple_index));
}
Shape new_shape = tuple_shapes.size() == 1
? tuple_shapes[0]
: ShapeUtil::MakeTupleShape(tuple_shapes);
*fusion_instruction->mutable_shape() = std::move(new_shape);
if (tuple_shapes.size() > 1) {
for (HloInstruction* gte : fusion_instruction->users()) {
auto it =
std::lower_bound(used_tuple_elements.begin(),
used_tuple_elements.end(), gte->tuple_index());
int64_t new_tuple_index =
std::distance(used_tuple_elements.begin(), it);
gte->set_tuple_index(new_tuple_index);
}
} else {
HloInstruction* gte = fusion_instruction->users()[0];
TF_ASSIGN_OR_RETURN(bool replaced,
gte->parent()->ReplaceInstruction(
gte, fusion_instruction,
true,
true));
if (replaced) {
changed |= replaced;
}
}
if (tuple_shapes.size() > 1) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(used_tuple_elements.size());
for (int64_t tuple_index : used_tuple_elements) {
new_operands.push_back(
computation->root_instruction()->mutable_operand(tuple_index));
}
auto new_tuple = computation->AddInstruction(
HloInstruction::CreateTuple(new_operands));
TF_RETURN_IF_ERROR(computation->ReplaceInstructionWithDifferentShape(
computation->root_instruction(), new_tuple));
} else {
TF_RETURN_IF_ERROR(
computation->root_instruction()->ReplaceAllUsesWithDifferentShape(
computation->root_instruction()->mutable_operand(
used_tuple_elements[0])));
}
}
}
std::vector<HloInstruction*> dead_roots;
for (auto* instruction : computation->instructions()) {
auto maybe_collective_op = DynCast<HloCollectiveInstruction>(instruction);
if (instruction->IsDead() && computation->IsSafelyRemovable(instruction) &&
(!instruction->IsCustomCall("Sharding") ||
(!instruction->operand(0)->IsRoot() &&
instruction->operand(0)->opcode() != HloOpcode::kParameter &&
instruction->operand(0)->user_count() == 1)) &&
(!instruction->HasSideEffect() ||
(remove_cross_partition_collective_ops && maybe_collective_op &&
!maybe_collective_op->constrain_layout()) ||
IsRemovableWhile(instruction,
remove_cross_partition_collective_ops))) {
dead_roots.push_back(instruction);
}
}
for (HloInstruction* dead_root : dead_roots) {
VLOG(1) << "Removing dead root " << dead_root->ToString()
<< " and its unused operands";
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dead_root));
changed = true;
}
if (changed) {
VLOG(3) << "After dce:";
XLA_VLOG_LINES(3, computation->ToString());
}
return changed;
}
absl::Status HloDCE::RecursivelyRemoveDeadComputation(
HloModule* module, HloComputation* computation,
absl::flat_hash_map<HloComputation*, int>& live_call_counts) {
std::vector<HloComputation*> to_be_deleted;
for (HloInstruction* instruction : computation->instructions()) {
for (HloComputation* subcomp : instruction->called_computations()) {
auto iter = live_call_counts.find(subcomp);
if (iter == live_call_counts.end()) {
return tsl::errors::Internal(
"called computation %s not found in live_call_counts table during "
"HloDCE",
subcomp->name());
}
int live_call_count = --iter->second;
CHECK_GE(live_call_count, 0);
if (live_call_count == 0) {
to_be_deleted.push_back(subcomp);
live_call_counts.erase(iter);
}
}
}
VLOG(1) << "Removing dead computation " << computation->name();
TF_RETURN_IF_ERROR(module->RemoveEmbeddedComputation(computation));
for (HloComputation* subcomp : to_be_deleted) {
TF_RETURN_IF_ERROR(
RecursivelyRemoveDeadComputation(module, subcomp, live_call_counts));
}
return absl::OkStatus();
}
absl::StatusOr<bool> HloDCE::RecursivelyRemoveDeadComputations(
HloModule* module) {
bool module_contains_dead_code = false;
absl::flat_hash_map<HloComputation*, int> live_computation_call_count;
if (HloComputation* entry_computation = module->entry_computation()) {
++live_computation_call_count[entry_computation];
}
for (auto* computation : module->MakeComputationPostOrder()) {
for (auto* instruction : computation->instructions()) {
for (auto* subcomp : instruction->called_computations()) {
++live_computation_call_count[subcomp];
}
}
}
for (auto* computation : module->MakeComputationPostOrder()) {
if (!live_computation_call_count.contains(computation)) {
TF_RETURN_IF_ERROR(RecursivelyRemoveDeadComputation(
module, computation, live_computation_call_count));
module_contains_dead_code = true;
}
}
return module_contains_dead_code;
}
absl::StatusOr<bool> HloDCE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(2) << "Before dce:";
XLA_VLOG_LINES(2, module->ToString());
auto computations = module->MakeComputationPostOrder(execution_threads);
std::reverse(computations.begin(), computations.end());
for (auto* computation : computations) {
TF_ASSIGN_OR_RETURN(
bool changed_for_computation,
RunOnComputation(computation, remove_cross_partition_collective_ops_));
changed |= changed_for_computation;
}
TF_ASSIGN_OR_RETURN(bool module_contains_dead_code,
RecursivelyRemoveDeadComputations(module));
changed |= module_contains_dead_code;
VLOG(2) << "After dce:";
XLA_VLOG_LINES(2, module->ToString());
return changed;
}
} | #include "xla/service/hlo_dce.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class HloDceTest : public HloTestBase {
protected:
HloDceTest() {}
bool HasInstruction(const HloComputation& computation,
const HloInstruction* instruction) {
return absl::c_linear_search(computation.instructions(), instruction);
}
};
TEST_F(HloDceTest, NoDeadCode) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
}
TEST_F(HloDceTest, InstructionsWithSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
HloInstruction::CreateSend(constant, token, 0));
builder.AddInstruction(HloInstruction::CreateSendDone(send));
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloDceTest, CustomCallInstructionsWithSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto instr = Cast<HloCustomCallInstruction>(builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo")));
instr->set_custom_call_has_side_effect(true);
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloDceTest, AsyncCustomCallInstructionsWithSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto instr = Cast<HloCustomCallInstruction>(builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo")));
instr->set_custom_call_has_side_effect(true);
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN([[maybe_unused]] HloInstruction * async_done,
module->entry_computation()->CreateAsyncInstructions(
instr, {{ShapeUtil::MakeScalarShape(U32)}},
HloInstruction::kMainExecutionThread,
true, true));
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloDceTest, CustomCallInstructionsWithoutSideEffect) {
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo"));
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_TRUE(result);
}
TEST_F(HloDceTest, AsyncCustomCallInstructionsWithoutSideEffect) {
auto builder = HloComputation::Builder(TestName());
auto instr = Cast<HloCustomCallInstruction>(builder.AddInstruction(
HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo")));
instr->set_custom_call_has_side_effect(false);
builder.AddInstruction(HloInstruction::CreateTuple({}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN([[maybe_unused]] HloInstruction * async_done,
module->entry_computation()->CreateAsyncInstructions(
instr, {{ShapeUtil::MakeScalarShape(U32)}},
HloInstruction::kMainExecutionThread,
true, true));
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_TRUE(result);
}
TEST_F(HloDceTest, ShardingCustomCallInstruction) {
auto builder = HloComputation::Builder(TestName());
auto p0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {10, 10}), "p0"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(p0->shape(), HloOpcode::kAdd, p0, p0));
auto dangling_sharding = builder.AddInstruction(
HloInstruction::CreateCustomCall(p0->shape(),
{add},
"Sharding"));
dangling_sharding->set_sharding(
HloSharding::Tile(TileAssignment((absl::Span<const int64_t>){2, 1})));
builder.AddInstruction(HloInstruction::CreateBinary(
p0->shape(), HloOpcode::kMultiply, add, add));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&dce, module.get()));
EXPECT_FALSE(result);
}
TEST_F(HloDceTest, ShardingCustomCallInstructionWithDeadOperand) {
auto builder = HloComputation::Builder(TestName());
auto p0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {10, 10}), "p0"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(p0->shape(), HloOpcode::kAdd, p0, p0));
auto dangling_sharding = builder.AddInstruction(
HloInstruction::CreateCustomCall(p0->shape(),
{add},
"Sharding"));
dangling_sharding->set_sharding(
HloSharding::Tile(TileAssignment((absl::Span<const int64_t>){2, 1})));
builder.AddInstruction(
HloInstruction::CreateBinary(p0->shape(), HloOpcode::kMultiply, p0, p0));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
}
TEST_F(HloDceTest, DeadParameters) {
auto builder = HloComputation::Builder(TestName());
auto live_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "live_param"));
auto dead_param1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "dead_param1"));
builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {}), "dead_param2"));
builder.AddInstruction(HloInstruction::CreateUnary(
dead_param1->shape(), HloOpcode::kNegate, dead_param1));
builder.AddInstruction(HloInstruction::CreateUnary(
live_param->shape(), HloOpcode::kNegate, live_param));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_EQ(1, dead_param1->user_count());
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_EQ(0, dead_param1->user_count());
}
TEST_F(HloDceTest, ControlDependencies) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
auto dead_negate = builder.AddInstruction(HloInstruction::CreateUnary(
constant1->shape(), HloOpcode::kNegate, constant1));
auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto dead_negate_with_control_dep =
builder.AddInstruction(HloInstruction::CreateUnary(
constant1->shape(), HloOpcode::kNegate, constant1));
auto dead_add_with_control_dep =
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(dead_negate_with_control_dep->AddControlDependencyTo(
dead_add_with_control_dep));
EXPECT_EQ(7, computation->instruction_count());
EXPECT_TRUE(HasInstruction(*computation, dead_negate));
EXPECT_TRUE(HasInstruction(*computation, dead_add));
EXPECT_TRUE(HasInstruction(*computation, dead_negate_with_control_dep));
EXPECT_TRUE(HasInstruction(*computation, dead_add_with_control_dep));
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_FALSE(HasInstruction(*computation, dead_negate));
EXPECT_FALSE(HasInstruction(*computation, dead_add));
EXPECT_TRUE(HasInstruction(*computation, dead_negate_with_control_dep));
EXPECT_TRUE(HasInstruction(*computation, dead_add_with_control_dep));
}
TEST_F(HloDceTest, DeadInstructionWithCalledComputation) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
auto callee_builder = HloComputation::Builder(TestName() + "-callee");
{
auto param = callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
callee_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, param));
}
auto called_computation =
module->AddEmbeddedComputation(callee_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto dead_call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {param}, called_computation));
builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, param));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_EQ(2, param->user_count());
EXPECT_EQ(0, dead_call->user_count());
EXPECT_TRUE(HasInstruction(*computation, dead_call));
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
EXPECT_EQ(1, param->user_count());
EXPECT_FALSE(HasInstruction(*computation, dead_call));
}
TEST_F(HloDceTest, CalledComputationWithSideEffect) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
auto cond_builder = HloComputation::Builder(TestName() + "-cond");
{
auto param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
auto constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,
constant, ComparisonDirection::kLt));
}
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder(TestName() + "-body");
{
auto param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token = body_builder.AddInstruction(HloInstruction::CreateToken());
auto infeed = body_builder.AddInstruction(
HloInstruction::CreateInfeed(shape, token, ""));
auto infeed_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, infeed, 0));
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, param, infeed_data));
}
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto live_while = builder.AddInstruction(HloInstruction::CreateWhile(
shape, cond_computation, body_computation, param));
builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, param));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_EQ(2, param->user_count());
EXPECT_EQ(0, live_while->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_while));
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_EQ(2, param->user_count());
EXPECT_EQ(0, live_while->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_while));
}
TEST_F(HloDceTest, CalledComputationWithNestedSideEffect) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
auto nested_callee_builder =
HloComputation::Builder(TestName() + "-nested_callee");
{
auto param = nested_callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token =
nested_callee_builder.AddInstruction(HloInstruction::CreateToken());
nested_callee_builder.AddInstruction(
HloInstruction::CreateOutfeed(shape, param, token, ""));
}
auto nested_called_computation =
module->AddEmbeddedComputation(nested_callee_builder.Build());
auto callee_builder = HloComputation::Builder(TestName() + "-callee");
{
auto param = callee_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
callee_builder.AddInstruction(HloInstruction::CreateCall(
ShapeUtil::MakeTokenShape(), {param}, nested_called_computation));
}
auto called_computation =
module->AddEmbeddedComputation(callee_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto live_call = builder.AddInstruction(HloInstruction::CreateCall(
ShapeUtil::MakeTokenShape(), {param}, called_computation));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, computation->instruction_count());
EXPECT_EQ(1, param->user_count());
EXPECT_EQ(0, live_call->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_call));
HloDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
EXPECT_EQ(1, param->user_count());
EXPECT_EQ(0, live_call->user_count());
EXPECT_TRUE(HasInstruction(*computation, live_call));
}
TEST_F(HloDceTest, RemoveDeadSubcomputation) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
HloComputation::Builder subcomp_builder("reduction_subcomp");
{
auto* param0 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param0"));
auto* param1 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param1"));
subcomp_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, param0, param1));
}
auto reduce_subcomp = module->AddEmbeddedComputation(subcomp_builder.Build());
builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(F32, {1}),
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param0")),
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
{0}, reduce_subcomp));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 1);
}
TEST_F(HloDceTest, KeepUsedSubcomputation) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
HloComputation::Builder subcomp_builder("reduction_subcomp");
{
auto* param0 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param0"));
auto* param1 =
subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "param1"));
subcomp_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, param0, param1));
}
auto reduce_subcomp = module->AddEmbeddedComputation(subcomp_builder.Build());
builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(F32, {}),
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param0")),
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
{0}, reduce_subcomp));
builder.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeShape(F32, {}),
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {100}), "param1")),
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
{0}, reduce_subcomp));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
HloDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
}
TEST_F(HloDceTest, RemovedNestedDeadComputations) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder called_subcomp_builder("called_dead_add");
{
auto* param0 =
called_subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
0, shape, "param0"));
auto* param1 =
called_subcomp_builder.AddInstruction(HloInstruction::CreateParameter(
1, shape, "param1"));
called_subcomp_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, param0, param1));
}
auto called_subcomp =
module->AddEmbeddedComputation(called_subcomp_builder.Build());
{
HloComputation::Builder dead_subcomp_builder("dead_caller0");
auto* param0 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto* param1 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
dead_subcomp_builder.AddInstruction(
HloInstruction::CreateCall(shape, {param0, param1}, called_subcomp));
module->AddEmbeddedComputation(dead_subcomp_builder.Build());
}
{
HloComputation::Builder dead_subcomp_builder("dead_caller1");
auto* param0 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto* param1 = dead_subcomp_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
dead_subcomp_builder.AddInstruction(
HloInstruction::CreateCall(shape, {param0, param1}, called_subcomp));
module->AddEmbeddedComputation(dead_subcomp_builder.Build());
}
HloComputation::Builder builder(TestName());
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(module->MakeComputationPostOrder().size(), 4);
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
EXPECT_EQ(module->MakeComputationPostOrder().size(), 1);
}
TEST_F(HloDceTest, MultiOutputFusionRemoveUnusedTupleElementsRemoveTuple) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
p2 = f32[32,32]{1,0} parameter(2)
add = f32[32,32]{1,0} add(p0, p1)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p2, add)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
param2 = f32[32,32]{1,0} parameter(2)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1, param2), kind=kLoop, calls=fused_add
gte.0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
ROOT gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(0), m::Parameter(1))
.WithShape(F32, {32, 32})));
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(
m::Add(m::Parameter(0), m::Parameter(1)).WithShape(F32, {32, 32})));
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
}
TEST_F(HloDceTest, MultiOutputFusionRemoveUnusedTupleElementAdjustTuple) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
add = f32[32,32]{1,0} add(p0, p1)
neg = f32[32,32]{1,0} negate(add)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(neg, p0, add)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1), kind=kLoop, calls=fused_add
gte.0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
gte.2 = f32[32,32]{1,0} get-tuple-element(fusion), index=2
ROOT add = f32[32,32]{1,0} add(gte.0, gte.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
Shape shape = ShapeUtil::MakeShape(F32, {32, 32});
Shape expected_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* fusion;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(
m::GetTupleElement(
m::Fusion(&fusion).WithShapeEqualTo(&expected_shape), 0),
m::GetTupleElement(m::Fusion(), 1))));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(
m::Tuple(m::Negate(), m::Add()).WithShapeEqualTo(&expected_shape)));
EXPECT_EQ(module->MakeComputationPostOrder().size(), 2);
}
TEST_F(HloDceTest,
MultiOutputFusionRemoveUnusedTupleElementWithControlAdjustTupleAndDep) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
add = f32[32,32]{1,0} add(p0, p1)
ROOT res = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, add)
}
ENTRY reduce {
param0 = f32[32,32]{1,0} parameter(0)
param1 = f32[32,32]{1,0} parameter(1)
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(param0, param1), kind=kLoop, calls=fused_add
gte.1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
add.2 = f32[32,32]{1,0} add(param0, param1), control-predecessors={gte.1}
ROOT add = f32[32,32]{1,0} add(add.2, gte.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloDCE dce;
auto changed = dce.Run(module.get());
ASSERT_TRUE(changed.ok());
EXPECT_TRUE(*changed);
HloInstruction* fusion;
HloInstruction* add2;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Add(&add2, m::Parameter(), m::Parameter()),
m::Fusion(&fusion))));
EXPECT_EQ(add2->control_predecessors().size(), 1);
EXPECT_EQ(add2->control_predecessors()[0], fusion);
}
}
} |
1,918 | cpp | tensorflow/tensorflow | collective_transformation_reorderer | third_party/xla/xla/service/collective_transformation_reorderer.cc | third_party/xla/xla/service/collective_transformation_reorderer_test.cc | #ifndef XLA_SERVICE_COLLECTIVE_TRANSFORMATION_REORDERER_H_
#define XLA_SERVICE_COLLECTIVE_TRANSFORMATION_REORDERER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectiveTransformationReorder : public HloModulePass {
public:
CollectiveTransformationReorder() = default;
~CollectiveTransformationReorder() override = default;
absl::string_view name() const override {
return "collective-transformation-reorderer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> ReorderAllGatherTransformations(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::StatusOr<bool> ReorderAllReduceTransformations(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
};
}
#endif
#include "xla/service/collective_transformation_reorderer.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct CollectiveTransformation {
HloInstruction* hlo;
int64_t transformed_collective_dimension;
};
std::optional<std::vector<CollectiveTransformation>>
GetAllGatherTransformations(HloInstruction* all_gather) {
std::vector<HloInstruction*> transformation_hlos;
{
HloInstruction* transformation_hlo = all_gather;
bool found_unsupported_transformation = false;
while (transformation_hlo->user_count() == 1 &&
!found_unsupported_transformation) {
transformation_hlo = transformation_hlo->users()[0];
switch (transformation_hlo->opcode()) {
case HloOpcode::kReshape: {
transformation_hlos.push_back(transformation_hlo);
break;
}
default:
found_unsupported_transformation = true;
}
}
}
if (transformation_hlos.empty()) {
return std::nullopt;
}
auto get_reshaped_all_gather_dimension =
[](const Shape& all_gather_shape, int64_t all_gather_dimension,
HloInstruction* transformation_hlo) -> std::optional<int64_t> {
int64_t all_gather_num_strides = absl::c_accumulate(
all_gather_shape.dimensions().subspan(0, all_gather_dimension), 1,
[](int64_t product, int64_t dimension_size) {
return product * dimension_size;
});
int64_t reshaped_all_gather_dimension = 0;
int64_t reshaped_num_strides = 1;
while (reshaped_all_gather_dimension <
transformation_hlo->shape().dimensions_size() &&
reshaped_num_strides < all_gather_num_strides) {
reshaped_num_strides *=
transformation_hlo->shape().dimensions(reshaped_all_gather_dimension);
++reshaped_all_gather_dimension;
}
if (reshaped_num_strides != all_gather_num_strides) {
return std::nullopt;
}
if (transformation_hlo->shape().dimensions(reshaped_all_gather_dimension) !=
all_gather_shape.dimensions(all_gather_dimension)) {
return std::nullopt;
}
return reshaped_all_gather_dimension;
};
std::vector<CollectiveTransformation> transformations;
HloAllGatherInstruction* all_gather_instruction =
DynCast<HloAllGatherInstruction>(all_gather);
Shape all_gather_shape = all_gather_instruction->shape();
int64_t all_gather_dimension = all_gather_instruction->all_gather_dimension();
CHECK(all_gather_instruction != nullptr);
for (HloInstruction* transformation_hlo : transformation_hlos) {
bool found_unsupported_transformation = false;
switch (transformation_hlo->opcode()) {
case HloOpcode::kReshape: {
std::optional<int64_t> reshaped_all_gather_dimension =
get_reshaped_all_gather_dimension(
all_gather_shape, all_gather_dimension, transformation_hlo);
if (reshaped_all_gather_dimension.has_value()) {
transformations.push_back(
{transformation_hlo, *reshaped_all_gather_dimension});
all_gather_shape = transformation_hlo->shape();
all_gather_dimension = *reshaped_all_gather_dimension;
} else {
found_unsupported_transformation = true;
}
break;
}
default:
return std::nullopt;
}
if (found_unsupported_transformation) {
break;
}
}
if (transformations.empty()) {
return std::nullopt;
}
return transformations;
}
std::vector<HloInstruction*> GetAllReduceTransformations(
HloInstruction* all_reduce) {
HloAllReduceInstruction* all_reduce_instruction =
DynCast<HloAllReduceInstruction>(all_reduce);
CHECK_NE(all_reduce_instruction, nullptr);
if (all_reduce_instruction->constrain_layout()) {
return {};
}
std::vector<HloInstruction*> transformation_hlos;
HloInstruction* transformation_hlo = all_reduce->mutable_operand(0);
while (transformation_hlo->opcode() == HloOpcode::kReshape &&
transformation_hlo->user_count() == 1) {
transformation_hlos.push_back(transformation_hlo);
transformation_hlo = transformation_hlo->mutable_operand(0);
}
return transformation_hlos;
}
}
absl::StatusOr<bool>
CollectiveTransformationReorder::ReorderAllGatherTransformations(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloInstructionMap<std::vector<CollectiveTransformation>>
all_gather_to_transformations;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kAllGather) {
if (instruction->operand_count() != 1) {
continue;
}
std::optional<std::vector<CollectiveTransformation>>
all_gather_transformations =
GetAllGatherTransformations(instruction);
if (all_gather_transformations.has_value()) {
all_gather_to_transformations[instruction] =
*std::move(all_gather_transformations);
}
}
}
}
if (all_gather_to_transformations.empty()) {
return false;
}
auto reshape_all_gather_operand =
[](HloInstruction* all_gather_operand,
int64_t original_all_gather_dimension,
const CollectiveTransformation& transformation) {
Shape reshaped_all_gather_operand_shape = transformation.hlo->shape();
int64_t operand_all_gather_dimension_size =
all_gather_operand->shape().dimensions(
original_all_gather_dimension);
reshaped_all_gather_operand_shape.set_dimensions(
transformation.transformed_collective_dimension,
operand_all_gather_dimension_size);
HloComputation* computation = all_gather_operand->parent();
return computation->AddInstruction(HloInstruction::CreateReshape(
reshaped_all_gather_operand_shape, all_gather_operand));
};
for (auto& [instruction, transformations] : all_gather_to_transformations) {
HloAllGatherInstruction* all_gather =
DynCast<HloAllGatherInstruction>(instruction);
int64_t all_gather_dimension = all_gather->all_gather_dimension();
int64_t original_all_gather_dimension_size =
all_gather->shape().dimensions(all_gather_dimension);
HloInstruction* all_gather_operand = instruction->mutable_operand(0);
for (const CollectiveTransformation& transformation : transformations) {
all_gather_operand = reshape_all_gather_operand(
all_gather_operand, all_gather_dimension, transformation);
all_gather_dimension = transformation.transformed_collective_dimension;
}
Shape new_all_gather_shape = all_gather_operand->shape();
new_all_gather_shape.set_dimensions(all_gather_dimension,
original_all_gather_dimension_size);
HloComputation* computation = all_gather_operand->parent();
HloInstruction* new_all_gather =
computation->AddInstruction(HloInstruction::CreateAllGather(
new_all_gather_shape, {all_gather_operand}, all_gather_dimension,
all_gather->device_list(), all_gather->constrain_layout(),
all_gather->channel_id(), all_gather->use_global_device_ids()));
TF_RETURN_IF_ERROR(
transformations.back().hlo->ReplaceAllUsesWith(new_all_gather));
if (computation->root_instruction() == transformations.back().hlo) {
computation->set_root_instruction(new_all_gather);
}
}
return true;
}
absl::StatusOr<bool>
CollectiveTransformationReorder::ReorderAllReduceTransformations(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloInstructionMap<std::vector<HloInstruction*>> all_reduce_to_transformations;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kAllReduce) {
if (instruction->user_count() != 1 ||
computation->root_instruction() == instruction) {
continue;
}
std::vector<HloInstruction*> reshapes =
GetAllReduceTransformations(instruction);
if (reshapes.empty()) {
continue;
}
all_reduce_to_transformations[instruction] = std::move(reshapes);
}
}
}
if (all_reduce_to_transformations.empty()) {
return false;
}
for (auto& [inst, reshapes] : all_reduce_to_transformations) {
HloComputation* computation = inst->parent();
HloAllReduceInstruction* all_reduce =
DynCast<HloAllReduceInstruction>(inst);
CHECK(!reshapes.empty());
HloInstruction* cur_operand = reshapes.back()->mutable_operand(0);
HloInstruction* new_all_reduce =
computation->AddInstruction(HloInstruction::CreateAllReduce(
cur_operand->shape(), {cur_operand}, all_reduce->to_apply(),
all_reduce->device_list(), all_reduce->constrain_layout(),
all_reduce->channel_id(), all_reduce->use_global_device_ids()));
cur_operand = new_all_reduce;
for (int64_t i = reshapes.size() - 1; i >= 0; --i) {
cur_operand = computation->AddInstruction(
HloInstruction::CreateReshape(reshapes[i]->shape(), cur_operand));
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(all_reduce, cur_operand));
}
return true;
}
absl::StatusOr<bool> CollectiveTransformationReorder::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(bool ag_changed, ReorderAllGatherTransformations(
module, execution_threads));
TF_ASSIGN_OR_RETURN(bool ar_changed, ReorderAllReduceTransformations(
module, execution_threads));
if (ag_changed || ar_changed) {
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
}
return ag_changed || ar_changed;
}
} | #include "xla/service/collective_transformation_reorderer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class CollectiveTransformationReordererTest : public HloTestBase {
public:
absl::StatusOr<bool> RunCollectiveTransformationReorderer(HloModule* module) {
CollectiveTransformationReorder reorderer;
return reorderer.Run(module, {});
}
};
TEST_F(CollectiveTransformationReordererTest,
ReshapeWithinShardAfterAllGatherDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,4,1024] parameter(0)
all-gather = bf16[8,32,1024] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[8,32,8,128] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Reshape(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));
}
TEST_F(CollectiveTransformationReordererTest,
ReshapeWithinShardBeforeAllGatherDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,4,1024] parameter(0)
all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[2048,32,1024] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Reshape(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));
}
TEST_F(CollectiveTransformationReordererTest,
ReshapeWithinShardBeforeAndAfterAllGatherDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,32,8,4,1024] parameter(0)
all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[2048,32,8,128] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllGather(op::Reshape(op::Parameter())));
HloInstruction* all_gather = module->entry_computation()->root_instruction();
EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));
}
TEST_F(CollectiveTransformationReordererTest, ReshapeAcrossShards) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,1,8,128] parameter(0)
all-gather = bf16[8,8,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[64,8,128] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, MergeAllGatherDimensionWithNext) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,8,16,16] parameter(0)
all-gather = bf16[64,8,16,16] all-gather(param), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[512,16,16] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest,
MergeAllGatherDimensionWithPrevious) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param = bf16[8,8,16,16] parameter(0)
all-gather = bf16[8,64,16,16] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1
ROOT reshape = bf16[512,16,16] reshape(all-gather)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, AllReduceSingleReshape) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(HloVerifier(false,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Reshape(op::AllReduce(op::Parameter())),
op::Constant(), op::Constant(), op::Constant()));
}
TEST_F(CollectiveTransformationReordererTest, AllReduceTwoReshapes) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,3072,2] parameter(0)
reshape.1 = bf16[16384,6144] reshape(param)
reshape.2 = bf16[1,16384,6144] reshape(reshape.1)
all-reduce = bf16[1,16384,6144] all-reduce(reshape.2), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(HloVerifier(false,
true)
.Run(module.get())
.status());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::DynamicSlice(op::Reshape(op::Reshape(op::AllReduce(op::Parameter()))),
op::Constant(), op::Constant(), op::Constant()));
}
TEST_F(CollectiveTransformationReordererTest, AllReduceReshapeWithTwoUsers) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
copy = bf16[1,16384,6144] copy(reshape)
ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, AllReduceWithTwoUsersReshape) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
constant = s32[] constant(0)
dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
copy = bf16[1,16384,6144] copy(all-reduce)
ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectiveTransformationReordererTest, AllReduceConstrainLayout) {
absl::string_view hlo_string = R"(
HloModule module
add {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT s = bf16[] add(a, b)
}
ENTRY entry {
param = bf16[16384,6144] parameter(0)
reshape = bf16[1,16384,6144] reshape(param)
all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, constrain_layout=true, to_apply=add
constant = s32[] constant(0)
ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunCollectiveTransformationReorderer(module.get()));
EXPECT_FALSE(changed);
}
}
} |
1,919 | cpp | tensorflow/tensorflow | cpu_gpu_shape_verifier | third_party/xla/xla/service/cpu_gpu_shape_verifier.cc | third_party/xla/xla/service/cpu_gpu_shape_verifier_test.cc | #ifndef XLA_SERVICE_CPU_GPU_SHAPE_VERIFIER_H_
#define XLA_SERVICE_CPU_GPU_SHAPE_VERIFIER_H_
#include <memory>
#include <utility>
#include "xla/service/hlo_verifier.h"
namespace xla {
class CpuGpuShapeVerifier : public ShapeVerifier {
public:
explicit CpuGpuShapeVerifier(const HloVerifierOpts& opts)
: ShapeVerifier(opts) {}
absl::Status Preprocess(HloInstruction* hlo) override;
};
class CpuGpuVerifierMetadata : public TargetVerifierMetadata {
public:
explicit CpuGpuVerifierMetadata(HloVerifierOpts&& opts)
: TargetVerifierMetadata(std::move(opts)) {}
std::unique_ptr<ShapeVerifier> GetVerifier() const override {
return std::make_unique<CpuGpuShapeVerifier>(GetVerifierOpts());
}
};
}
#endif
#include "xla/service/cpu_gpu_shape_verifier.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
absl::Status VerifyS4U4Usage(HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConstant:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kFusion:
case HloOpcode::kGetTupleElement:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
break;
default:
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
instruction->shape(), [&](const Shape& shape, const ShapeIndex&) {
if (primitive_util::IsSubByteNonPredType(shape.element_type())) {
return absl::InvalidArgumentError(absl::StrFormat(
"%s is currently only supported in convert instructions, "
"but got instruction: %s",
primitive_util::LowercasePrimitiveTypeName(
shape.element_type()),
instruction->ToString()));
}
return absl::OkStatus();
}));
break;
}
return absl::OkStatus();
}
}
absl::Status CpuGpuShapeVerifier::Preprocess(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
hlo->shape(), [&](const Shape& shape, const ShapeIndex&) {
if (shape.has_layout()) {
if (LayoutUtil::IsSparseArray(shape)) {
return absl::InvalidArgumentError(absl::StrFormat(
"The XLA CPU/GPU backend does not support sparse shapes: %s",
hlo->ToString()));
}
if (!primitive_util::IsSubByteNonPredType(shape.element_type()) &&
shape.layout().element_size_in_bits() != 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"The XLA CPU/GPU backend does not support custom element sizes "
"on non-sub-byte-bit types: %s",
hlo->ToString()));
}
}
return absl::OkStatus();
}));
TF_RETURN_IF_ERROR(VerifyS4U4Usage(hlo));
return ShapeVerifier::Preprocess(hlo);
}
} | #include "xla/service/cpu_gpu_shape_verifier.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
class CpuGpuShapeVerifierTest : public HloTestBase {
public:
CpuGpuShapeVerifierTest() {
HloVerifierOpts opts;
std::unique_ptr<TargetVerifierMetadata> metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
hlo_verifier_ = std::make_unique<HloVerifier>(std::move(metadata));
}
};
TEST_F(CpuGpuShapeVerifierTest, Int4UnsupportedInstruction) {
const char* const hlo_string = R"(
HloModule Module
ENTRY main {
p0 = u4[2,5] parameter(0)
ROOT out = u4[2,5] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("u4 is currently only supported in convert instructions"));
}
TEST_F(CpuGpuShapeVerifierTest, Int4SupportedInstruction) {
const char* const hlo_string = R"(
HloModule Module
ENTRY main {
p0 = u4[] parameter(0)
ROOT out = u4[3, 3] broadcast(p0), dimensions={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
TF_EXPECT_OK(status);
}
}
} |
1,920 | cpp | tensorflow/tensorflow | while_loop_constant_sinking | third_party/xla/xla/service/while_loop_constant_sinking.cc | third_party/xla/xla/service/while_loop_constant_sinking_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_CONSTANT_SINKING_H_
#define XLA_SERVICE_WHILE_LOOP_CONSTANT_SINKING_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class WhileLoopConstantSinking : public HloModulePass {
public:
explicit WhileLoopConstantSinking(bool sink_broadcast_of_constants = false,
bool sink_only_scalar_constants = false)
: sink_broadcast_of_constants_(sink_broadcast_of_constants),
sink_only_scalar_constants_(sink_only_scalar_constants) {}
~WhileLoopConstantSinking() override = default;
absl::string_view name() const override {
return "while-loop-constant-sinking";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> TrySinkingConstantsIntoWhileLoop(
HloInstruction* while_instr);
const bool sink_broadcast_of_constants_;
const bool sink_only_scalar_constants_;
};
}
#endif
#include "xla/service/while_loop_constant_sinking.h"
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "xla/service/while_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::Status ReplaceUsesWhileKeepingLoopInvariance(
HloInstruction* old_instr, HloInstruction* new_instr,
HloInstruction* while_body_root, int64_t tuple_index) {
CHECK_EQ(while_body_root->opcode(), HloOpcode::kTuple);
std::vector<HloInstruction*> users;
users.reserve(old_instr->user_count());
absl::c_copy(old_instr->users(), std::back_inserter(users));
for (auto* user : users) {
for (int64_t i = 0, e = user->operand_count(); i < e; i++) {
if (user->operand(i) == old_instr &&
!(user == while_body_root && i == tuple_index)) {
TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, new_instr));
}
}
}
return absl::OkStatus();
}
HloInstruction* CloneHelper(const HloInstruction* instruction,
HloComputation* computation) {
if (instruction->opcode() == HloOpcode::kConstant) {
return computation->AddInstruction(instruction->Clone(".sunk"));
}
if (instruction->opcode() == HloOpcode::kBroadcast) {
return computation->AddInstruction(instruction->CloneWithNewOperands(
instruction->shape(),
{CloneHelper(instruction->operand(0), computation)}));
}
LOG(FATAL) << "Unexpected instruction.";
}
}
absl::StatusOr<bool> WhileLoopConstantSinking::TrySinkingConstantsIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
const HloInstruction& init_value = *while_instr->operand(0);
if (init_value.opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
conditional_gte_index_to_insts =
WhileUtil::GetGTEsMapForWhileConditional(*while_cond);
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
const HloInstruction& invariant_value = *init_value.operand(index);
if (invariant_value.opcode() != HloOpcode::kConstant &&
(!sink_broadcast_of_constants_ ||
invariant_value.opcode() != HloOpcode::kBroadcast ||
invariant_value.operand(0)->opcode() != HloOpcode::kConstant)) {
continue;
}
if (sink_only_scalar_constants_) {
if (!ShapeUtil::IsScalar(init_value.operand(index)->shape())) {
continue;
}
}
if (invariant_body_gte->user_count() > 1) {
HloInstruction* constant_instr =
CloneHelper(&invariant_value, while_body);
TF_RETURN_IF_ERROR(ReplaceUsesWhileKeepingLoopInvariance(
invariant_body_gte, constant_instr, while_body->root_instruction(),
index));
changed = true;
}
auto it = conditional_gte_index_to_insts.find(index);
if (it == conditional_gte_index_to_insts.end()) {
continue;
}
for (HloInstruction* invariant_cond_gte : it->second) {
if (invariant_cond_gte->user_count() > 0) {
HloInstruction* constant_instr =
CloneHelper(&invariant_value, while_cond);
TF_RETURN_IF_ERROR(
invariant_cond_gte->ReplaceAllUsesWith(constant_instr));
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> WhileLoopConstantSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopConstantSinking:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingConstantsIntoWhileLoop(while_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopConstantSinking:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after WhileLoopConstantSinking";
}
return changed;
}
} | #include "xla/service/while_loop_constant_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopConstantSinkingTest = HloTestBase;
TEST_F(WhileLoopConstantSinkingTest, SinkOneConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
WhileLoopConstantSinking(false,
true)
.Run(module.get()));
ASSERT_FALSE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed, WhileLoopConstantSinking(false,
false)
.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Constant()), _));
}
TEST_F(WhileLoopConstantSinkingTest, SinkBroadcastOfConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[16],f32[16]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[16],f32[16]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[] constant(1)
const_1 = f32[] constant(2)
broadcast_0 = f32[16] broadcast(const_0), dimensions={}
broadcast_1 = f32[16] broadcast(const_1), dimensions={}
while_init = tuple(broadcast_0, broadcast_1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
WhileLoopConstantSinking(false)
.Run(module.get()));
ASSERT_FALSE(changed);
TF_ASSERT_OK_AND_ASSIGN(
changed, WhileLoopConstantSinking(true)
.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Broadcast(op::Constant())), _));
}
TEST_F(WhileLoopConstantSinkingTest, KeepConstantsLoopInvariant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=1
p_body.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=2
add.0 = f32[2] add(p_body.1, p_body.2)
ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_body.1, p_body.2)
}
condition {
p_cond = (f32[2],f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
const_2 = f32[2] constant({3, 1})
while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(op::Constant(), op::Constant()),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, TupleShapedConstants) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[2],(f32[2],f32[2])) parameter(0)
p_b.0 = f32[2] get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=0
p_b.1 = (f32[2],f32[2]) get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=1
p_b.1.1 = f32[2] get-tuple-element(p_b.1), index=0
ROOT root = (f32[2],(f32[2],f32[2])) tuple(p_b.1.1, p_b.1)
}
condition {
p_cond = (f32[2],(f32[2],f32[2])) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = (f32[2], f32[2]) constant(({2, 1},{3,1}))
while_init = (f32[2],(f32[2],f32[2])) tuple(const_0, const_1)
ROOT while = (f32[2],(f32[2],f32[2])) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(op::Constant(), 0),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, DuplicateGTEs) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[2],f32[2],f32[2]) parameter(0)
p_b.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=1
p_b.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2
p_b.2.dup = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2
add.0 = f32[2] add(p_b.1, p_b.2.dup)
ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_b.1, p_b.2)
}
condition {
p_cond = (f32[2],f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
const_2 = f32[2] constant({3, 1})
while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(op::Constant(), ::testing::Not(op::Constant())),
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(WhileLoopConstantSinkingTest, DontCreateDeadConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
token0 = token[] after-all()
outfeed = token[] outfeed(p_body.0, token0)
ROOT root = (f32[2],f32[2],f32[2]) tuple(p_body.0, p_body.1, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] constant({1, 2})
const_1 = f32[2] constant({2, 1})
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition,
body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::GetTupleElement()));
for (const HloInstruction* inst : while_body->instructions()) {
if (inst->opcode() == HloOpcode::kConstant) {
EXPECT_GT(inst->user_count(), 0);
}
}
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalSinkConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[]) p_body), index=1
ROOT root = (f32[],f32[]) tuple(add, p_body.1)
}
condition {
p_cond = (f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=0
p_cond.1 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=1
ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(10)
while_init = (f32[],f32[]) tuple(const_0, const_1)
ROOT while = (f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalTupleShapedConstants) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_b = (f32[],(f32[],f32[])) parameter(0)
p_b.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_b), index=0
p_b.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_b), index=1
p_b.1.0 = f32[] get-tuple-element((f32[],f32[]) p_b.1), index=0
add = f32[] add(p_b.0, p_b.1.0)
ROOT root = (f32[],(f32[],f32[])) tuple(add, p_b.1)
}
condition {
p_c = (f32[],(f32[],f32[])) parameter(0)
p_c.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_c), index=0
p_c.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_c), index=1
p_c.1.1 = f32[] get-tuple-element((f32[],f32[]) p_c.1), index=1
ROOT result = pred[] compare(p_c.0, p_c.1.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = (f32[], f32[]) constant((1, 10))
while_init = (f32[],(f32[],f32[])) tuple(const_0, const_1)
ROOT while = (f32[],(f32[],f32[])) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(),
op::Lt(_, op::GetTupleElement(op::Constant())));
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalDontCreateDeadConstant) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1
p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2
ROOT root = (f32[],f32[],f32[]) tuple(add, p_body.1, p_body.2)
}
condition {
p_cond = (f32[],f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0
p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1
p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(10)
const_2 = f32[] constant(12)
while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)
ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));
for (const HloInstruction* inst : while_condition->instructions()) {
if (inst->opcode() == HloOpcode::kConstant) {
EXPECT_GT(inst->user_count(), 0);
}
}
}
TEST_F(WhileLoopConstantSinkingTest, ConditionalMultipleSameIndexGTEs) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[],f32[],f32[]) parameter(0)
p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0
const = f32[] constant(1)
add.0 = f32[] add(p_body.0, const)
p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1
add.1 = f32[] add(p_body.1, const)
p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2
ROOT root = (f32[],f32[],f32[]) tuple(add.0, add.1, p_body.2)
}
condition {
p_cond = (f32[],f32[],f32[]) parameter(0)
p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0
p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
lt.0 = pred[] compare(p_cond.0, p_cond.2), direction=LT
p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1
p_cond.2.c = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2
lt.1 = pred[] compare(p_cond.1, p_cond.2.c), direction=LT
ROOT result = pred[] and(lt.0, lt.1)
}
ENTRY entry {
const_0 = f32[] constant(0)
const_1 = f32[] constant(0)
const_2 = f32[] constant(12)
while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)
ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConstantSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_condition = module->GetComputationWithName("condition");
EXPECT_THAT(while_condition->root_instruction(),
op::And(op::Lt(_, op::Constant()), op::Lt(_, op::Constant())));
}
}
} |
1,921 | cpp | tensorflow/tensorflow | dump | third_party/xla/xla/service/dump.cc | third_party/xla/xla/service/dump_test.cc | #ifndef XLA_SERVICE_DUMP_H_
#define XLA_SERVICE_DUMP_H_
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/Operation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/xla.pb.h"
namespace xla {
constexpr char kBeforeOptimizationsDumpName[] = "before_optimizations";
constexpr char kAfterOptimizationsDumpName[] = "after_optimizations";
class BufferAssignment;
class HloSnapshot;
std::string TimestampFor(const HloModule& module);
std::string FilenameFor(int unique_id, absl::string_view module_name,
absl::string_view prefix, absl::string_view suffix);
std::string FilenameFor(const HloModule& module, absl::string_view prefix,
absl::string_view suffix);
void DumpToFileInDir(const HloModule& module, absl::string_view file_prefix,
absl::string_view file_suffix, absl::string_view contents);
void DumpToFileInDir(const DebugOptions& debug_options,
absl::string_view filename, absl::string_view contents);
void DumpToFileInDirOrStdout(const HloModule& module,
absl::string_view file_prefix,
absl::string_view file_suffix,
absl::string_view contents);
void DumpToFileInDirOrStdout(const DebugOptions& debug_options, int unique_id,
absl::string_view module_name,
absl::string_view file_prefix,
absl::string_view file_suffix,
absl::string_view contents);
void DumpToFileInDirOrStdout(const HloModule& module,
absl::string_view file_prefix,
mlir::Operation* op);
void DumpProtobufToFile(const tsl::protobuf::Message& proto,
const DebugOptions& debug_options,
absl::string_view filename,
absl::AnyInvocable<absl::StatusOr<std::string>(
tsl::Env*, const tsl::protobuf::Message&)>
text_formatter = nullptr);
std::string RenderGraph(absl::string_view label, const HloModule& module,
RenderedGraphFormat format,
bool show_fusion_subcomputations = true);
void DumpPerModuleProtobufToFile(const HloModule& module,
const tsl::protobuf::Message& proto,
const DebugOptions& debug_options,
absl::string_view name,
absl::AnyInvocable<absl::StatusOr<std::string>(
tsl::Env*, const tsl::protobuf::Message&)>
text_formatter = nullptr);
std::vector<std::string> DumpHloModuleIfEnabled(const HloModule& module,
absl::string_view name);
std::vector<std::string> DumpHloModuleIfEnabled(
const HloModule& module, const BufferAssignment& buffer_assn,
absl::string_view name);
std::vector<std::string> DumpHloModuleBetweenPassesIfEnabled(
absl::string_view pipeline_name, absl::string_view before_pass_name,
absl::string_view after_pass_name, const HloModule& module);
void DumpHloModuleDuringPassIfEnabled(absl::string_view pass_name,
absl::string_view step,
const HloModule& module);
void DumpHloSnapshotIfEnabled(const HloModule& module,
const HloSnapshot& snapshot);
void DumpHloSnapshotIfEnabled(const HloSnapshot& snapshot,
const DebugOptions& opts);
void DumpHloModuleMetadataIfEnabled(const std::vector<HloModule*>& modules);
bool DumpingEnabledForHloModule(absl::string_view hlo_module_name,
const DebugOptions& opts);
bool DumpingEnabledForHloPass(absl::string_view hlo_pass_name,
const DebugOptions& opts);
inline bool DumpingEnabledForHloModule(const HloModule& module) {
return DumpingEnabledForHloModule(module.name(),
module.config().debug_options());
}
bool DumpingToStdout(const DebugOptions& opts);
}
#endif
#include "xla/service/dump.h"
#include <cstdint>
#include <functional>
#include <iostream>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Transforms/LocationSnapshot.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/util.h"
#include "tsl/lib/io/zlib_compression_options.h"
#include "tsl/lib/io/zlib_outputbuffer.h"
#include "tsl/lib/strings/proto_serialization.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
std::string RenderGraph(absl::string_view label, const HloModule& module,
RenderedGraphFormat format,
bool show_fusion_subcomputations) {
HloRenderOptions hlo_render_options;
hlo_render_options.show_fusion_subcomputations = show_fusion_subcomputations;
absl::StatusOr<std::string> rendered_graph =
RenderGraph(*module.entry_computation(), label,
module.config().debug_options(), format, hlo_render_options);
if (rendered_graph.ok()) {
return std::move(rendered_graph).value();
}
return absl::StrFormat("Error rendering graph: %s",
rendered_graph.status().ToString());
}
namespace {
using absl::StrCat;
using absl::StrFormat;
using absl::string_view;
struct CanonicalDebugOptions {
explicit CanonicalDebugOptions(const DebugOptions& opts)
: dump_to(opts.xla_dump_to()),
dump_as_text(opts.xla_dump_hlo_as_text()),
dump_as_proto(opts.xla_dump_hlo_as_proto()),
dump_as_dot(opts.xla_dump_hlo_as_dot()),
dump_as_html(opts.xla_dump_hlo_as_html()),
dump_as_url(opts.xla_dump_hlo_as_url()),
dump_fusion_visualization(opts.xla_dump_fusion_visualization()),
dump_snapshots(opts.xla_dump_hlo_snapshots()),
dump_include_timestamp(opts.xla_dump_include_timestamp()),
dump_max_hlo_modules(opts.xla_dump_max_hlo_modules()),
dump_module_metadata(opts.xla_dump_module_metadata()),
dump_compress_protos(opts.xla_dump_compress_protos()),
dump_hlo_metadata(!opts.xla_dump_disable_metadata()),
dump_as_long_text(opts.xla_dump_hlo_as_long_text()),
dump_mlir_pretty_form(opts.xla_dump_enable_mlir_pretty_form()),
dump_large_constants(opts.xla_dump_large_constants()) {
bool output_format_other_than_url_specified =
opts.xla_dump_hlo_as_text() || opts.xla_dump_hlo_as_proto() ||
opts.xla_dump_hlo_as_dot() || opts.xla_dump_hlo_as_html() ||
opts.xla_dump_hlo_snapshots();
bool output_format_specified =
output_format_other_than_url_specified || opts.xla_dump_hlo_as_url();
if (!output_format_specified) {
dump_as_text = true;
}
if (!opts.xla_enable_dumping()) {
dump_to = "";
}
if (opts.xla_dump_to().empty() && output_format_other_than_url_specified) {
dump_to = "-";
}
if (!opts.xla_dump_hlo_module_re().empty()) {
std::string pattern = opts.xla_dump_hlo_module_re();
should_dump_module = [pattern](string_view module_name) {
return RE2::PartialMatch(module_name, pattern);
};
} else if (!opts.xla_dump_hlo_pass_re().empty() ||
!opts.xla_dump_to().empty() || output_format_specified) {
should_dump_module = [](string_view) { return true; };
} else {
should_dump_module = [](string_view) { return false; };
}
if (!opts.xla_dump_hlo_pass_re().empty()) {
std::string pattern = opts.xla_dump_hlo_pass_re();
should_dump_pass = [pattern](string_view pass_name) {
return RE2::PartialMatch(pass_name, pattern);
};
} else {
should_dump_pass = [](string_view) { return false; };
}
if (!opts.xla_dump_hlo_pipeline_re().empty()) {
std::string pattern = opts.xla_dump_hlo_pipeline_re();
should_dump_pipeline = [pattern](string_view pipeline_name) {
return RE2::PartialMatch(pipeline_name, pattern);
};
} else {
should_dump_pipeline = [](string_view) { return true; };
}
std::string dump_to_lower = absl::AsciiStrToLower(dump_to);
if (dump_to_lower == "sponge" ||
dump_to_lower == "test_undeclared_outputs_dir") {
if (!tsl::io::GetTestUndeclaredOutputsDir(&dump_to)) {
LOG(ERROR) << "--xla_dump_to=" << opts.xla_dump_to()
<< ", but environment variable TEST_UNDECLARED_OUTPUTS_DIR "
"is not set, so cannot dump anywhere.";
should_dump_module = [](string_view) { return false; };
should_dump_pass = [](string_view) { return false; };
should_dump_pipeline = [](string_view) { return false; };
}
}
}
bool dumping_to_stdout() const { return dump_to == "-"; }
std::string dump_to;
std::function<bool(string_view module_name)> should_dump_module;
std::function<bool(string_view pass_name)> should_dump_pass;
std::function<bool(string_view pipeline_name)> should_dump_pipeline;
bool dump_as_text;
bool dump_as_proto;
bool dump_as_dot;
bool dump_as_html;
bool dump_as_url;
bool dump_fusion_visualization;
bool dump_snapshots;
bool dump_include_timestamp;
int64_t dump_max_hlo_modules;
bool dump_module_metadata;
bool dump_compress_protos;
bool dump_hlo_metadata;
bool dump_as_long_text;
bool dump_mlir_pretty_form;
bool dump_large_constants;
};
class DataProducer {
public:
void Append(std::function<std::string()> produce_func) {
produce_funcs_.push(std::move(produce_func));
}
std::function<std::string()> Next() {
if (produce_funcs_.empty()) {
return nullptr;
}
auto next = std::move(produce_funcs_.front());
produce_funcs_.pop();
return next;
}
private:
std::queue<std::function<std::string()>> produce_funcs_;
};
static absl::Status WriteStringToFile(tsl::Env* env, const std::string& fname,
DataProducer& data_producer,
bool compressed) {
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file));
if (compressed) {
auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP();
tsl::io::ZlibOutputBuffer gz_file(file.get(), gz_opts.input_buffer_size,
gz_opts.output_buffer_size, gz_opts);
TF_RETURN_IF_ERROR(gz_file.Init());
while (auto next_producer = data_producer.Next()) {
TF_RETURN_IF_ERROR(gz_file.Append(next_producer()));
}
return gz_file.Close();
} else {
while (auto next_producer = data_producer.Next()) {
TF_RETURN_IF_ERROR(file->Append(next_producer()));
}
return file->Close();
}
}
static absl::Status WriteStringToFile(tsl::Env* env, const std::string& fname,
absl::string_view data, bool compressed) {
if (!compressed) {
return tsl::WriteStringToFile(env, fname, data);
}
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file));
auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP();
tsl::io::ZlibOutputBuffer gz_file(file.get(), gz_opts.input_buffer_size,
gz_opts.output_buffer_size, gz_opts);
TF_RETURN_IF_ERROR(gz_file.Init());
TF_RETURN_IF_ERROR(gz_file.Append(data));
return gz_file.Close();
}
static std::optional<std::string> GetDumpFilePath(
string_view filename, const CanonicalDebugOptions& opts) {
if (opts.dumping_to_stdout()) {
LOG(ERROR) << "Refusing to write " << filename
<< " to stdout. Pass --xla_dump_to=<path> to write to a file.";
return std::nullopt;
}
if (opts.dump_to.empty()) {
return std::nullopt;
}
const std::string& dir = opts.dump_to;
VLOG(1) << "Dumping " << filename << " to " << dir;
tsl::Env* env = tsl::Env::Default();
if (!env->IsDirectory(dir).ok()) {
auto status = env->RecursivelyCreateDir(dir);
if (!status.ok() && !env->IsDirectory(dir).ok()) {
LOG(ERROR) << "Could not create directory " << dir
<< " for dumping XLA debug data: " << status;
return std::nullopt;
}
}
if (opts.dump_max_hlo_modules > 0) {
std::vector<std::string> matches;
auto pattern = tsl::io::JoinPath(dir, "*module_*.*");
auto status = env->GetMatchingPaths(pattern, &matches);
if (!status.ok()) {
LOG(ERROR) << "Could not get matching paths for pattern " << pattern
<< ": " << status;
}
static const LazyRE2 module_id_regex = {R"(.*module_(\d+)\..*)"};
absl::flat_hash_set<int64_t> dumped_module_ids;
for (const std::string& match : matches) {
int64_t dumped_module_id;
if (RE2::FullMatch(match, *module_id_regex, &dumped_module_id)) {
dumped_module_ids.insert(dumped_module_id);
}
}
if (dumped_module_ids.size() >= opts.dump_max_hlo_modules) {
int64_t module_id;
if (RE2::FullMatch(filename, *module_id_regex, &module_id) &&
!dumped_module_ids.contains(module_id)) {
LOG(ERROR) << "Have already dumped " << dumped_module_ids.size()
<< " modules, more than the limit of "
<< opts.dump_max_hlo_modules;
return std::nullopt;
}
}
}
return tsl::io::JoinPath(dir, SanitizeFileName(std::string(filename)));
}
static std::optional<std::string> DumpToFileInDirImpl(
string_view filename, string_view contents,
const CanonicalDebugOptions& opts, bool compress = false) {
auto file_path = GetDumpFilePath(filename, opts);
if (!file_path) return std::nullopt;
auto status =
WriteStringToFile(tsl::Env::Default(), *file_path, contents, compress);
if (!status.ok()) {
LOG(ERROR) << "Could not write XLA debug data to " << *file_path << ": "
<< status;
return std::nullopt;
}
return file_path;
}
static std::optional<std::string> DumpToFileInDirImpl(
string_view filename, DataProducer& data_producer,
const CanonicalDebugOptions& opts, bool compress = false) {
auto file_path = GetDumpFilePath(filename, opts);
if (!file_path) return std::nullopt;
auto status = WriteStringToFile(tsl::Env::Default(), *file_path,
data_producer, compress);
if (!status.ok()) {
LOG(ERROR) << "Could not write XLA debug data to " << *file_path << ": "
<< status;
return std::nullopt;
}
return file_path;
}
static absl::Mutex stdout_dump_mutex(absl::kConstInit);
static std::optional<std::string> DumpToFileInDirOrStdoutImpl(
string_view filename, string_view contents,
const CanonicalDebugOptions& opts) {
if (opts.dumping_to_stdout()) {
absl::MutexLock lock(&stdout_dump_mutex);
std::cout << "*** Begin " << filename << " ***\n"
<< contents << "\n*** End " << filename << " ***" << std::endl;
return std::nullopt;
}
return DumpToFileInDirImpl(filename, contents, opts);
}
static std::optional<std::string> DumpToFileInDirOrStdoutImpl(
string_view filename, DataProducer& data_producer,
const CanonicalDebugOptions& opts) {
if (opts.dumping_to_stdout()) {
absl::MutexLock lock(&stdout_dump_mutex);
std::cout << "*** Begin " << filename << " ***\n";
while (auto next_producer = data_producer.Next()) {
std::cout << next_producer();
}
std::cout << "\n*** End " << filename << " ***" << std::endl;
return std::nullopt;
}
return DumpToFileInDirImpl(filename, data_producer, opts);
}
static bool IsTrivial(const HloComputation& computation) {
const HloInstruction* root = computation.root_instruction();
return absl::c_all_of(root->operands(),
[&](const HloInstruction* op) {
return op->opcode() == HloOpcode::kParameter;
}) &&
root->opcode() != HloOpcode::kFusion;
}
static std::vector<std::string> DumpHloModuleImpl(
const HloModule& module, const BufferAssignment* buffer_assn,
string_view prefix, string_view suffix, const CanonicalDebugOptions& opts) {
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaDumpHloModule:#module=%s,program_id=%d#",
module.name(), module.unique_id());
});
std::string filename = FilenameFor(module, prefix, suffix);
std::vector<std::optional<std::string>> file_paths;
if (opts.dump_as_text) {
auto print_options = opts.dump_as_long_text
? HloPrintOptions::Default()
: HloPrintOptions::ShortParsable();
print_options.set_print_large_constants(opts.dump_large_constants);
print_options.set_print_control_dependencies(true);
print_options.set_print_operand_index_annotation_interval(5);
print_options.set_print_backend_config(true);
print_options.set_print_metadata(opts.dump_hlo_metadata);
print_options.set_print_name_after_closing_brace(true);
file_paths.push_back(DumpToFileInDirOrStdoutImpl(
StrCat(filename, ".txt"), module.ToString(print_options), opts));
if (buffer_assn) {
DataProducer data_producer;
data_producer.Append([&] { return buffer_assn->ToString(); });
data_producer.Append([&] { return "\n\n"; });
data_producer.Append(
[&] { return buffer_assn->hlo_live_range().ToString(); });
file_paths.push_back(DumpToFileInDirOrStdoutImpl(
StrCat(filename, "-buffer-assignment.txt"), data_producer, opts));
}
}
if (opts.dump_as_proto) {
HloProto module_proto =
buffer_assn ? MakeHloProto(module, *buffer_assn) : MakeHloProto(module);
std::string pb;
if (!tsl::SerializeToStringDeterministic(module_proto, &pb)) {
pb = "Failed to serialize HLO module proto.";
}
file_paths.push_back(DumpToFileInDirImpl(
StrCat(filename, opts.dump_compress_protos ? ".hlo.pb.gz" : ".hlo.pb"),
pb, opts, opts.dump_compress_protos));
}
if (opts.dump_as_dot) {
file_paths.push_back(DumpToFileInDirImpl(
StrFormat("%s.dot", filename),
RenderGraph(filename, module, RenderedGraphFormat::kDot), opts));
}
if (opts.dump_as_html) {
file_paths.push_back(DumpToFileInDirImpl(
StrFormat("%s.html", filename),
RenderGraph(filename, module, RenderedGraphFormat::kHtml), opts));
if (absl::StrContains(filename, kAfterOptimizationsDumpName)) {
file_paths.push_back(DumpToFileInDirImpl(
StrFormat("%s.top_level.html", filename),
RenderGraph(filename, module, RenderedGraphFormat::kHtml, false),
opts));
}
}
if (opts.dump_fusion_visualization) {
for (const HloComputation* computation :
module.MakeNonfusionComputations()) {
if (IsTrivial(*computation)) {
VLOG(1) << "Skipping computation " << computation->name()
<< " as trivial";
continue;
}
absl::StatusOr<std::string> rendered_graph =
WrapFusionExplorer(*computation);
if (!rendered_graph.ok()) {
VLOG(1) << "Skipping fusion visualization"
<< " for computation " << computation->name()
<< " due to: " << rendered_graph.status();
continue;
}
file_paths.push_back(DumpToFileInDirImpl(
FilenameFor(module, computation->name(), "_fusion.html"),
*rendered_graph, opts));
}
}
if (opts.dump_as_url) {
std::string url = RenderGraph(filename, module, RenderedGraphFormat::kUrl);
std::cout << filename << " --> " << url << std::endl;
if (!opts.dumping_to_stdout()) {
file_paths.push_back(
DumpToFileInDirImpl(StrFormat("%s.url", filename), url, opts));
}
}
std::vector<std::string> dumped_file_paths;
for (const std::optional<std::string>& path : file_paths) {
if (path.has_value()) {
dumped_file_paths.push_back(*path);
}
}
if (!dumped_file_paths.empty()) {
LOG_FIRST_N(INFO, 1) << "HloModule dump enabled with path prefix: "
<< prefix << ", suffix: " << suffix;
}
return dumped_file_paths;
}
static void DumpHloModuleMetadata(
const HloModuleMetadataProto& metadata, const CanonicalDebugOptions& opts,
absl::flat_hash_set<int64_t>* dumped_module_ids) {
if (!dumped_module_ids->insert(metadata.canonical_module_id()).second) {
return;
}
std::string filename = absl::StrFormat("module_%04d.metadata.textproto",
metadata.canonical_module_id());
std::string content;
if (tsl::protobuf::TextFormat::PrintToString(metadata, &content)) {
DumpToFileInDirImpl(filename, content, opts);
} else {
LOG(ERROR) << "Failed to convert HloModuleMetadataProto to text.";
}
}
static absl::Mutex mu(absl::kConstInit);
static auto& module_id_to_step_number ABSL_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, int64_t>();
static auto& module_id_to_timestamp ABSL_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, uint64_t>();
int64_t StepNumberForModule(const HloModule& module) {
absl::MutexLock lock(&mu);
return module_id_to_step_number[module.unique_id()]++;
}
}
std::string TimestampFor(const HloModule& module) {
if (!module.config().debug_options().xla_dump_include_timestamp()) {
return "";
}
absl::MutexLock lock(&mu);
auto timestamp_emplace = module_id_to_timestamp.try_emplace(
module.unique_id(), tsl::Env::Default()->NowMicros());
return std::to_string(timestamp_emplace.first->second);
}
std::string FilenameFor(int unique_id, string_view module_name,
string_view prefix, string_view suffix) {
std::string filename;
if (!prefix.empty()) {
absl::StrAppend(&filename, prefix, ".");
}
absl::StrAppendFormat(&filename, "module_%04d", unique_id);
if (!module_name.empty()) {
absl::StrAppend(&filename, ".", module_name);
}
absl::StrAppend(&filename, ".", suffix);
if (!module_name.empty() && filename.size() > 255) {
return FilenameFor(unique_id, "", prefix, suffix);
}
return filename;
}
std::string FilenameFor(const HloModule& module, string_view prefix,
string_view suffix) {
return FilenameFor(module.unique_id(), module.name(), prefix, suffix);
}
void DumpToFileInDir(const HloModule& module, string_view file_prefix,
string_view file_suffix, string_view contents) {
DumpToFileInDir(module.config().debug_options(),
FilenameFor(module, file_prefix, file_suffix), contents);
}
void DumpToFileInDir(const DebugOptions& debug_options,
absl::string_view filename, absl::string_view conten | #include "xla/service/dump.h"
#include <memory>
#include <string>
#include "absl/strings/match.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(DumpHloIfEnabled, LargeConstantElided) {
HloModuleConfig config;
DebugOptions options = config.debug_options();
auto env = tsl::Env::Default();
std::string dump_dir;
EXPECT_TRUE(env->LocalTempFilename(&dump_dir));
options.set_xla_dump_to(dump_dir);
options.set_xla_dump_hlo_as_text(true);
options.set_xla_dump_large_constants(false);
config.set_debug_options(options);
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[11] parameter(0)
c = s32[11] constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
ROOT x = s32[11] multiply(p0, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnUnverifiedModule(kModuleStr, config));
std::string dump_name = "dump";
auto paths = DumpHloModuleIfEnabled(*m, dump_name);
EXPECT_EQ(paths.size(), 1);
std::string data;
EXPECT_TRUE(ReadFileToString(env, paths[0], &data).ok());
EXPECT_TRUE(absl::StrContains(data, "{...}"));
}
TEST(DumpHloIfEnabled, LargeConstantPrinted) {
HloModuleConfig config;
DebugOptions options = config.debug_options();
auto env = tsl::Env::Default();
std::string dump_dir;
EXPECT_TRUE(env->LocalTempFilename(&dump_dir));
options.set_xla_dump_to(dump_dir);
options.set_xla_dump_hlo_as_text(true);
options.set_xla_dump_large_constants(true);
config.set_debug_options(options);
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[11] parameter(0)
c = s32[11] constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10})
ROOT x = s32[11] multiply(p0, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnUnverifiedModule(kModuleStr, config));
std::string dump_name = "dump";
auto paths = DumpHloModuleIfEnabled(*m, dump_name);
EXPECT_EQ(paths.size(), 1);
std::string data;
EXPECT_TRUE(ReadFileToString(env, paths[0], &data).ok());
EXPECT_TRUE(!absl::StrContains(data, "{...}"));
}
}
} |
1,922 | cpp | tensorflow/tensorflow | hlo_element_type_converter | third_party/xla/xla/service/hlo_element_type_converter.cc | third_party/xla/xla/service/hlo_element_type_converter_test.cc | #ifndef XLA_SERVICE_HLO_ELEMENT_TYPE_CONVERTER_H_
#define XLA_SERVICE_HLO_ELEMENT_TYPE_CONVERTER_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloElementTypeConverter : public HloModulePass {
public:
HloElementTypeConverter(PrimitiveType eliminate_type,
PrimitiveType replace_with_type);
absl::string_view name() const override { return "element_type_converter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
PrimitiveType eliminate_type_;
PrimitiveType replace_with_type_;
};
}
#endif
#include "xla/service/hlo_element_type_converter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
HloInstruction* ToElementType(HloInstruction* hlo, PrimitiveType type) {
if (hlo->shape().element_type() != type) {
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateConvert(shape, hlo));
}
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
bool HasOperandType(HloInstruction* hlo, PrimitiveType type) {
for (HloInstruction* operand : hlo->operands()) {
if (operand->shape().element_type() == type) {
return true;
}
}
return false;
}
Shape GetConvertedTupleShape(const Shape& shape, PrimitiveType from_type,
PrimitiveType to_type) {
std::vector<Shape> new_tuple_subshapes;
const int64_t n = ShapeUtil::TupleElementCount(shape);
new_tuple_subshapes.reserve(n);
for (int64_t i = 0; i < n; ++i) {
Shape subshape = ShapeUtil::GetTupleElementShape(shape, i);
CHECK(!subshape.IsTuple());
if (subshape.element_type() == from_type) {
subshape = ShapeUtil::ChangeElementType(subshape, to_type);
}
new_tuple_subshapes.push_back(subshape);
}
return ShapeUtil::MakeTupleShape(new_tuple_subshapes);
}
HloInstruction* ConvertTupleElements(HloInstruction* hlo,
const Shape& to_shape) {
const Shape& shape = hlo->shape();
HloComputation* computation = hlo->parent();
std::vector<HloInstruction*> tuple_elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& ele_shape = ShapeUtil::GetTupleElementShape(shape, i);
HloInstruction* element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(ele_shape, hlo, i));
const Shape& to_ele_shape = ShapeUtil::GetTupleElementShape(to_shape, i);
CHECK(!ele_shape.IsTuple());
if (ele_shape.element_type() != to_ele_shape.element_type()) {
element = computation->AddInstruction(
HloInstruction::CreateConvert(to_ele_shape, element));
}
tuple_elements.push_back(element);
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
}
HloElementTypeConverter::HloElementTypeConverter(
PrimitiveType eliminate_type, PrimitiveType replace_with_type)
: eliminate_type_(eliminate_type), replace_with_type_(replace_with_type) {}
absl::StatusOr<bool> HloElementTypeConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
3, "HloElementTypeConverter::Run(), before:\n" + module->ToString());
if (eliminate_type_ == replace_with_type_) {
return false;
}
HloCloneContext context(module);
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
for (auto* hlo : computation->MakeInstructionPostOrder()) {
const auto opcode = hlo->opcode();
if (opcode == HloOpcode::kParameter || opcode == HloOpcode::kConstant ||
opcode == HloOpcode::kTuple || opcode == HloOpcode::kConvert ||
opcode == HloOpcode::kBitcastConvert ||
opcode == HloOpcode::kGetTupleElement ||
opcode == HloOpcode::kInfeed || opcode == HloOpcode::kOutfeed) {
continue;
}
if (opcode == HloOpcode::kCustomCall) {
continue;
}
if (opcode == HloOpcode::kWhile || opcode == HloOpcode::kCall ||
opcode == HloOpcode::kAllReduce ||
opcode == HloOpcode::kReduceScatter ||
opcode == HloOpcode::kAllReduceStart ||
opcode == HloOpcode::kFusion || opcode == HloOpcode::kMap ||
opcode == HloOpcode::kReduce || opcode == HloOpcode::kReduceWindow ||
opcode == HloOpcode::kScatter ||
opcode == HloOpcode::kSelectAndScatter ||
opcode == HloOpcode::kSort || opcode == HloOpcode::kConditional) {
continue;
}
TF_RET_CHECK(hlo->called_computations().empty()) << hlo->ToString();
bool nullary = hlo->operands().empty();
bool wrong_element_type = hlo->shape().element_type() == eliminate_type_;
bool should_eliminate_type = (nullary && wrong_element_type) ||
HasOperandType(hlo, eliminate_type_);
if (!should_eliminate_type) {
TF_RET_CHECK(hlo->shape().element_type() != eliminate_type_);
continue;
}
std::vector<HloInstruction*> new_operands;
const auto& operands = hlo->operands();
new_operands.reserve(operands.size());
for (HloInstruction* operand : operands) {
if (operand->shape().element_type() == eliminate_type_) {
operand = ToElementType(operand, replace_with_type_);
}
new_operands.push_back(operand);
}
HloInstruction* new_hlo;
if (hlo->shape().element_type() == eliminate_type_) {
Shape shape =
ShapeUtil::ChangeElementType(hlo->shape(), replace_with_type_);
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(shape, new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
new_hlo = ToElementType(new_hlo, eliminate_type_);
} else if (hlo->shape().IsTuple()) {
Shape old_shape = hlo->shape();
Shape new_shape = GetConvertedTupleShape(hlo->shape(), eliminate_type_,
replace_with_type_);
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(new_shape, new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
new_hlo = ConvertTupleElements(new_hlo, old_shape);
} else {
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
}
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_hlo));
TF_RETURN_IF_ERROR(hlo->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation->RemoveInstruction(hlo));
changed = true;
}
}
XLA_VLOG_LINES(
2, "HloElementTypeConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/hlo_element_type_converter.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Not;
using ::testing::ResultOf;
using HloElementTypeConverterTest = HloTestBase;
TEST_F(HloElementTypeConverterTest, CustomCallsNotConverted) {
const std::string& hlo_string = R"(
HloModule custom_call
ENTRY CustomCall {
constant = bf16[1]{0} constant({12345})
ROOT custom-call = bf16[1,2,3]{0,2,1} custom-call(constant),
custom_call_target="foo"
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_FALSE(converted);
}
TEST_F(HloElementTypeConverterTest, InfeedsOutfeedsNotConverted) {
const std::string& hlo_string = R"(
HloModule InfeedOutfeed
ENTRY RoundTrip16MiBR1.v2 {
token0 = token[] after-all()
infeed = (bf16[4]{0}, token[]) infeed(token0)
ROOT infeed.data = bf16[4]{0} get-tuple-element(infeed), index=0
outfeed = token[] outfeed(infeed.data, token0)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_FALSE(converted);
}
TEST_F(HloElementTypeConverterTest, OperationsInNestedTuplesConverted) {
const std::string& hlo_string = R"(
HloModule NestedTuples
ENTRY NestedTuples.v5 {
constant.2 = f32[2]{0} constant({1, 2})
constant.3 = bf16[2]{0} constant({42, 42})
add = bf16[2]{0} add(constant.2, constant.3)
tuple = (f32[2]{0}, bf16[2]{0}) tuple(constant.2, add)
constant.5 = bf16[2]{0} constant({22, 44})
ROOT tuple.1 = ((f32[2]{0}, bf16[2]{0}), bf16[2]{0}) tuple(tuple, constant.5)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
const HloInstruction* bf16_op =
module->entry_computation()->root_instruction()->operand(0)->operand(1);
EXPECT_THAT(bf16_op, op::Convert(op::Add(op::Constant(), op::Convert())));
}
TEST_F(HloElementTypeConverterTest, BatchNormGradBF16Converted) {
const std::string& hlo_string = R"(
HloModule BatchNormGrad
ENTRY BatchNormGrad.v6 {
constant.4 = bf16[2,2,2,1]{3,2,1,0} constant({ {
{ {0}, {0} }, { {0}, {0} } }, { { {0},
{0} }, { {0}, {0} } } })
constant.5 = bf16[2]{0} constant({1, 1})
constant.6 = bf16[2]{0} constant({0, 0})
constant.7 = bf16[2]{0} constant({1, 1})
constant.8 = bf16[2,2,2,1]{3,2,1,0} constant({ {
{ {1}, {2} }, { {3}, {4} } }, { {
{5}, {6} }, { {7}, {8} } } })
ROOT batch-norm-grad = (bf16[2,2,2,1]{3,2,1,0}, bf16[2]{0}, bf16[2]{0})
batch-norm-grad(constant.4, constant.5, constant.6, constant.7,
constant.8), epsilon=0, feature_index=2
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
const HloInstruction* tuple_instr =
module->entry_computation()->root_instruction();
::testing::Matcher<const ::xla::HloInstruction*> batch_norm =
op::BatchNormGrad();
EXPECT_THAT(tuple_instr,
op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)),
op::Convert(op::GetTupleElement(batch_norm, 1)),
op::Convert(op::GetTupleElement(batch_norm, 2))));
}
TEST_F(HloElementTypeConverterTest, RngIsRemoved) {
const std::string& hlo_string = R"(
HloModule RngIsRemoved
ENTRY main {
constant.3 = bf16[] constant(0)
constant.4 = bf16[] constant(1)
ROOT rng = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
HloPredicate is_bf16_rng = [](const HloInstruction* inst) {
return inst->shape().element_type() == BF16 &&
inst->opcode() == HloOpcode::kRng;
};
EXPECT_THAT(module->entry_computation()->instructions(),
Not(Contains(ResultOf(is_bf16_rng, Eq(true)))));
}
TEST_F(HloElementTypeConverterTest, RngCtrlDep) {
const std::string& hlo_string = R"(
HloModule RngIsRemoved
ENTRY main {
constant.3 = bf16[] constant(0)
constant.4 = bf16[] constant(1)
rng0 = bf16[1,2000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform
ROOT rng1 = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), control-predecessors={%rng0}, distribution=rng_uniform
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
HloInstruction *rng0, *rng1;
for (auto* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kRng) {
const Shape& shape = inst->shape();
ASSERT_EQ(shape.dimensions_size(), 3);
ASSERT_TRUE(shape.dimensions(1) == 2000 || shape.dimensions(1) == 1000);
if (shape.dimensions(1) == 2000) {
rng0 = inst;
} else {
rng1 = inst;
}
}
}
EXPECT_THAT(rng0->control_successors(), ElementsAre(rng1));
EXPECT_THAT(rng1->control_predecessors(), ElementsAre(rng0));
}
TEST_F(HloElementTypeConverterTest, BitcastConvertIsUnmodified) {
const std::string& hlo_string = R"(
HloModule test
ENTRY test {
p = bf16[] parameter(0)
ROOT c = u16[] bitcast-convert(p)
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, RunHloPass(&converter, module.get()));
EXPECT_FALSE(converted);
}
}
} |
1,923 | cpp | tensorflow/tensorflow | layout_normalization | third_party/xla/xla/service/layout_normalization.cc | third_party/xla/xla/service/layout_normalization_test.cc | #ifndef XLA_SERVICE_LAYOUT_NORMALIZATION_H_
#define XLA_SERVICE_LAYOUT_NORMALIZATION_H_
#include <functional>
#include <optional>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
using CustomCallTransformer =
std::function<absl::StatusOr<std::optional<HloInstruction*>>(
HloCustomCallInstruction*)>;
class LayoutNormalization : public HloModulePass {
public:
explicit LayoutNormalization(
const CustomCallTransformer& custom_call_transformer = nullptr)
: custom_call_transformer_(custom_call_transformer) {}
absl::string_view name() const override { return "layout_normalization"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
CustomCallTransformer custom_call_transformer_;
};
}
#endif
#include "xla/service/layout_normalization.h"
#include <algorithm>
#include <cstring>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class LayoutNormalizationVisitor : public DfsHloRewriteVisitor {
public:
explicit LayoutNormalizationVisitor(
const CustomCallTransformer& custom_call_transformer = nullptr)
: custom_call_transformer_(custom_call_transformer) {}
absl::Status HandleConstant(HloInstruction* hlo) override {
Literal& literal = *Cast<HloConstantInstruction>(hlo)->mutable_literal();
if (literal.shape().IsTuple()) {
return absl::OkStatus();
}
const Shape& shape = hlo->shape();
Shape normalized_shape = Normalize(shape);
*literal.mutable_shape_do_not_use() = normalized_shape;
literal.mutable_shape_do_not_use()
->mutable_layout()
->set_element_size_in_bits(0);
HloInstruction* bc_to_orig = MakeBitcastHlo(hlo, shape);
*hlo->mutable_shape() = normalized_shape;
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWithDifferentShape(bc_to_orig));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
const Shape& s = hlo->shape();
const Shape& operand_shape = operand->shape();
TF_RET_CHECK(s.layout() == operand_shape.layout());
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
Shape normalized = Normalize(operand_shape);
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
auto normalize_slice_attr = [&](absl::Span<int64_t const> input) {
return Permute(input, layout_as_permutation);
};
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_slice,
MakeSliceHlo(normalized_input,
normalize_slice_attr(hlo->slice_starts()),
normalize_slice_attr(hlo->slice_limits()),
normalize_slice_attr(hlo->slice_strides()),
&hlo->metadata()));
*normalized_slice->mutable_shape()->mutable_layout() =
normalized_input->shape().layout();
SetVisited(*normalized_slice);
HloInstruction* bc_to_orig = MakeBitcastHlo(normalized_slice, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status DefaultAction(HloInstruction* hlo) override {
if (!hlo->user_count()) {
return absl::OkStatus();
}
auto users = hlo->users();
auto shape = hlo->shape();
if (shape.IsTuple() || shape.IsToken()) {
return absl::OkStatus();
}
auto normalized_shape = Normalize(shape);
auto bc_to_normalized = MakeBitcastHlo(hlo, normalized_shape);
SetVisited(*bc_to_normalized);
auto bc_to_orig = MakeBitcastHlo(bc_to_normalized, shape);
TF_RETURN_IF_ERROR(hlo->ReplaceUsesWith(users, bc_to_orig));
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleConcatenate(HloInstruction* hlo) override {
const Shape& s = hlo->shape();
int64_t orig_concat_dim = hlo->dimensions(0);
std::vector<HloInstruction*> normalized_inputs;
for (HloInstruction* operand : hlo->mutable_operands()) {
TF_ASSIGN_OR_RETURN(auto normalized_input, GetNormalizedInput(operand));
normalized_inputs.push_back(normalized_input);
}
auto normalized_shape = Normalize(s);
auto layout_as_permutation = ToTransposeDimensions(s.layout());
int64_t normalized_concat_dim =
InversePermutation(layout_as_permutation)[orig_concat_dim];
auto normalized_concat =
hlo->AddInstruction(HloInstruction::CreateConcatenate(
normalized_shape, normalized_inputs, normalized_concat_dim));
SetVisited(*normalized_concat);
auto bc_to_orig = MakeBitcastHlo(normalized_concat, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleReduceWindow(HloInstruction* hlo) override {
if (hlo->shape().IsTuple()) {
return absl::OkStatus();
}
HloInstruction* operand = hlo->mutable_operand(0);
TF_RET_CHECK(hlo->shape().layout() == operand->shape().layout());
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
std::vector<WindowDimension> window_dimensions;
for (const WindowDimension& d : hlo->window().dimensions()) {
window_dimensions.push_back(d);
}
window_dimensions = Permute(window_dimensions, layout_as_permutation);
Window new_window;
for (const WindowDimension& d : window_dimensions) {
*new_window.add_dimensions() = d;
}
TF_ASSIGN_OR_RETURN(
HloInstruction * rw,
MakeReduceWindowHlo(normalized_input, hlo->mutable_operand(1),
new_window, hlo->called_computations()[0],
&hlo->metadata()));
SetVisited(*rw);
HloInstruction* bc_to_orig = MakeBitcastHlo(rw, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleBroadcast(HloInstruction* hlo) override {
VLOG(3) << "Input broadcast: " << hlo->ToString();
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto normalized_input, GetNormalizedInput(operand));
auto normalized_shape = Normalize(s);
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(operand->shape().layout());
std::vector<int64_t> orig_output_layout_as_permutation =
ToTransposeDimensions(s.layout());
std::vector<int64_t> br_dimensions;
if (!hlo->dimensions().empty()) {
br_dimensions.reserve(hlo->dimensions().size());
auto inverse_perm = InversePermutation(orig_output_layout_as_permutation);
for (int64_t dim :
ComposePermutations(hlo->dimensions(), layout_as_permutation)) {
br_dimensions.push_back(inverse_perm[dim]);
}
}
auto normalized_broadcast = MakeBroadcastHlo(
normalized_input, br_dimensions, normalized_shape, &hlo->metadata());
SetVisited(*normalized_broadcast);
VLOG(3) << "Generated broadcast: " << normalized_broadcast->ToString();
auto bc_to_orig = MakeBitcastHlo(normalized_broadcast, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleIota(HloInstruction* hlo) override {
VLOG(3) << "Input iota: " << hlo->ToString();
auto s = hlo->shape();
auto normalized_shape = Normalize(s);
std::vector<int64_t> orig_output_layout_as_permutation =
ToTransposeDimensions(s.layout());
int64_t new_iota_dimension = InversePermutation(
orig_output_layout_as_permutation)[hlo->dimensions()[0]];
auto normalized_iota = hlo->AddInstruction(
HloInstruction::CreateIota(normalized_shape, new_iota_dimension));
SetVisited(*normalized_iota);
VLOG(3) << "Generated iota: " << normalized_iota->ToString();
auto bc_to_orig = MakeBitcastHlo(normalized_iota, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* hlo) override {
if (hlo->shape().rank() == hlo->operand(0)->shape().rank()) {
return HandleElementwiseUnary(hlo);
}
return DefaultAction(hlo);
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
auto operand_shape = operand->shape();
TF_RET_CHECK(
Layout::Equal().IgnoreElementSize()(s.layout(), operand_shape.layout()))
<< "Unexpected non-layout preserving elementwise unary: "
<< hlo->ToString();
TF_ASSIGN_OR_RETURN(auto normalized_input, GetNormalizedInput(operand));
PrimitiveType to_element_type = s.element_type();
HloInstruction* new_unary;
if (hlo->opcode() == HloOpcode::kConvert) {
new_unary =
MakeConvertToHlo(normalized_input, to_element_type, &hlo->metadata());
} else if (hlo->opcode() == HloOpcode::kReducePrecision) {
new_unary =
MakeReducePrecisionHlo(normalized_input, hlo->exponent_bits(),
hlo->mantissa_bits(), &hlo->metadata());
} else if (hlo->opcode() == HloOpcode::kBitcastConvert) {
new_unary = MakeBitcastConvertToHlo(normalized_input, to_element_type,
&hlo->metadata());
} else {
TF_ASSIGN_OR_RETURN(
new_unary,
MakeUnaryHlo(hlo->opcode(), normalized_input, &hlo->metadata()));
}
if (normalized_input != new_unary) {
SetVisited(*new_unary);
}
auto bc_to_orig = MakeBitcastHlo(new_unary, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
auto s = hlo->shape();
auto a = hlo->mutable_operand(0);
auto b = hlo->mutable_operand(1);
TF_RET_CHECK(a->shape().layout() == s.layout());
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(a));
TF_ASSIGN_OR_RETURN(auto b0, GetNormalizedInput(b));
HloInstruction* new_binary;
if (hlo->opcode() == HloOpcode::kCompare) {
TF_ASSIGN_OR_RETURN(new_binary,
MakeCompareHlo(hlo->comparison_direction(), a0, b0,
&hlo->metadata()));
} else {
TF_ASSIGN_OR_RETURN(
new_binary, MakeBinaryHlo(hlo->opcode(), a0, b0, &hlo->metadata()));
}
SetVisited(*new_binary);
auto bc_to_orig = MakeBitcastHlo(new_binary, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleReshape(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_RET_CHECK(ShapeUtil::ReshapeIsBitcast(s, operand->shape()));
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
auto normalized_reshape_s = Normalize(s);
TF_ASSIGN_OR_RETURN(auto new_reshape,
MakeReshapeHlo(normalized_reshape_s, a0));
SetVisited(*new_reshape);
auto bc_to_orig = MakeBitcastHlo(new_reshape, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleScatter(HloInstruction* hlo) override {
auto* scatter = Cast<HloScatterInstruction>(hlo);
std::vector<HloInstruction*> normalized_operands;
normalized_operands.reserve(scatter->scatter_operand_count());
Shape operand_shape = scatter->scatter_operands().front()->shape();
for (HloInstruction* operand : scatter->scatter_operands()) {
if (operand->shape().layout() != operand_shape.layout()) {
return FailedPrecondition(
"All scatter operands must have the same layout");
}
TF_ASSIGN_OR_RETURN(auto normalized_operand, GetNormalizedInput(operand));
normalized_operands.push_back(normalized_operand);
}
std::vector<HloInstruction*> normalized_updates;
normalized_updates.reserve(scatter->scatter_operand_count());
Shape update_shape = scatter->scatter_updates().front()->shape();
for (HloInstruction* operand : scatter->scatter_updates()) {
if (operand->shape().layout() != update_shape.layout()) {
return FailedPrecondition(
"All scatter updates must have the same layout");
}
TF_ASSIGN_OR_RETURN(auto normalized_update, GetNormalizedInput(operand));
normalized_updates.push_back(normalized_update);
}
const auto& dims = scatter->scatter_dimension_numbers();
if (scatter->scatter_updates().front()->shape().rank() -
dims.update_window_dims_size() >
1) {
return FailedPrecondition(
"There should be just a single scatter dimension. Make sure to run "
"ScatterSimplifier before LayoutNormalization");
}
TF_ASSIGN_OR_RETURN(auto normalized_indices,
GetNormalizedInput(scatter->scatter_indices()));
auto indices_permutation = InversePermutation(
ToTransposeDimensions(scatter->scatter_indices()->shape().layout()));
auto layout_permutation =
ToTransposeDimensions(scatter->scatter_operands()[0]->shape().layout());
auto operand_permutation = InversePermutation(layout_permutation);
auto update_permutation = InversePermutation(
ToTransposeDimensions(scatter->scatter_updates()[0]->shape().layout()));
ScatterDimensionNumbers normalized_dims;
normalized_dims.set_index_vector_dim(
indices_permutation[dims.index_vector_dim()]);
for (int64_t dim : dims.scatter_dims_to_operand_dims()) {
normalized_dims.add_scatter_dims_to_operand_dims(
operand_permutation[dim]);
}
std::vector<int64_t> normalized_update_window_dims;
normalized_update_window_dims.reserve(dims.update_window_dims_size());
for (int64_t dim : dims.update_window_dims()) {
normalized_update_window_dims.push_back(update_permutation[dim]);
}
std::vector<int64_t> window_dimensions(operand_permutation.size());
for (int64_t i = 0, j = 0, k = 0; i < window_dimensions.size(); ++i) {
if (j < dims.inserted_window_dims_size() &&
dims.inserted_window_dims(j) == i) {
window_dimensions[i] = -1;
++j;
} else {
window_dimensions[i] = normalized_update_window_dims[k];
++k;
}
}
std::vector<int64_t> permuted_window_dimensions =
ComposePermutations(window_dimensions, layout_permutation);
for (int64_t i = 0; i < permuted_window_dimensions.size(); ++i) {
if (permuted_window_dimensions[i] == -1) {
normalized_dims.add_inserted_window_dims(i);
} else {
normalized_dims.add_update_window_dims(permuted_window_dimensions[i]);
}
}
auto normalized_shape = normalized_operands.front()->shape();
if (scatter->shape().IsTuple()) {
std::vector<Shape> tuple_shapes;
tuple_shapes.reserve(normalized_operands.size());
for (HloInstruction* operand : normalized_operands) {
tuple_shapes.push_back(operand->shape());
}
normalized_shape = ShapeUtil::MakeTupleShape(tuple_shapes);
}
auto normalized_scatter = hlo->AddInstruction(HloInstruction::CreateScatter(
normalized_shape, normalized_operands, normalized_indices,
normalized_updates, scatter->to_apply(), normalized_dims,
scatter->indices_are_sorted(), scatter->unique_indices()));
SetVisited(*normalized_scatter);
auto bc_to_orig = MakeBitcastHlo(normalized_scatter, scatter->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(scatter, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleTranspose(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
auto operand_s = operand->shape();
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
auto normalized_shape = Normalize(s);
VLOG(3) << "Input transpose: " << hlo->ToString();
if (!ShapeUtil::TransposeIsBitcast(s, operand_s, hlo->dimensions())) {
auto l0_perm =
InversePermutation(ToTransposeDimensions(operand_s.layout()));
auto l_perm = ToTransposeDimensions(s.layout());
auto t = ComposePermutations(l0_perm, hlo->dimensions());
auto dimensions = ComposePermutations(t, l_perm);
auto normalized_transpose = hlo->AddInstruction(
HloInstruction::CreateTranspose(normalized_shape, a0, dimensions));
SetVisited(*normalized_transpose);
VLOG(3) << "Generated normalized physical transpose: "
<< normalized_transpose->ToString();
auto bc_to_orig = MakeBitcastHlo(normalized_transpose, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
} else {
auto bc_to_orig = MakeBitcastHlo(a0, s, &hlo->metadata());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
}
return absl::OkStatus();
}
absl::Status HandleCopy(HloInstruction* hlo) override {
VLOG(3) << "Processing copy: " << hlo->ToString();
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
auto s_normalized = Normalize(s);
auto l0_perm =
InversePermutation(ToTransposeDimensions(operand->shape().layout()));
auto l_perm = ToTransposeDimensions(s.layout());
auto dimensions = ComposePermutations(l0_perm, l_perm);
auto t = hlo->AddInstruction(
HloInstruction::CreateTranspose(s_normalized, a0, dimensions));
SetVisited(*t);
auto bc_to_orig = MakeBitcastHlo(t, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleReverse(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto a0, GetNormalizedInput(operand));
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
std::vector<int64_t> new_dimensions;
new_dimensions.reserve(hlo->dimensions().size());
auto inverse_perm = InversePermutation(layout_as_permutation);
for (int64_t dim : hlo->dimensions()) {
new_dimensions.push_back(inverse_perm[dim]);
}
absl::c_sort(new_dimensions);
auto normalized_reverse = hlo->AddInstruction(
HloInstruction::CreateReverse(a0->shape(), a0, new_dimensions));
SetVisited(*normalized_reverse);
auto bc_to_orig = MakeBitcastHlo(normalized_reverse, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandlePad(HloInstruction* hlo) override {
auto s = hlo->shape();
auto operand = hlo->mutable_operand(0);
auto padded_by = hlo->mutable_operand(1);
auto padded_config = hlo->padding_config();
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
auto s_normalized = Normalize(s);
auto layout_as_permutation = ToTransposeDimensions(s.layout());
PaddingConfig new_padding;
new_padding.mutable_dimensions()->Reserve(s_normalized.dimensions_size());
for (int dim = 0; dim < s_normalized.dimensions_size(); dim++) {
new_padding.add_dimensions();
}
auto inverse_perm = InversePermutation(layout_as_permutation);
for (int dim = 0; dim < s.dimensions_size(); dim++) {
int tr_dim = static_cast<int>(inverse_perm[dim]);
*new_padding.mutable_dimensions(tr_dim) = padded_config.dimensions(dim);
}
auto padded_normalized = hlo->AddInstruction(HloInstruction::CreatePad(
s_normalized, normalized_input, padded_by, new_padding));
SetVisited(*padded_normalized);
auto bc_to_orig = MakeBitcastHlo(padded_normalized, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleCustomCall(HloInstruction* hlo) override {
if (custom_call_transformer_) {
TF_ASSIGN_OR_RETURN(
std::optional<HloInstruction*> transformed_custom_call,
custom_call_transformer_(Cast<HloCustomCallInstruction>(hlo)));
if (transformed_custom_call) {
SetVisited(*(*transformed_custom_call)->operand(0));
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, *transformed_custom_call));
return absl::OkStatus();
}
}
return DefaultAction(hlo);
}
absl::Status HandleSelect(HloInstruction* hlo) override {
return HandleTernary(hlo);
}
absl::Status HandleDynamicSlice(HloInstruction* hlo) override {
const Shape& s = hlo->shape();
HloInstruction* operand = hlo->mutable_operand(0);
const Shape& operand_shape = operand->shape();
TF_RET_CHECK(s.layout() == operand_shape.layout());
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_input,
GetNormalizedInput(operand));
Shape normalized = Normalize(operand_shape);
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
std::vector<HloInstruction*> new_start_indices =
GetNewStartIdxs(hlo, 1, layout_as_permutation);
auto normalize_slice_attr = [&](absl::Span<int64_t const> input) {
return Permute(input, layout_as_permutation);
};
TF_ASSIGN_OR_RETURN(
HloInstruction * normalized_dynamic_slice,
MakeDynamicSliceHlo(normalized_input, new_start_indices,
normalize_slice_attr(hlo->dynamic_slice_sizes()),
&hlo->metadata()));
*normalized_dynamic_slice->mutable_shape()->mutable_layout() =
normalized_input->shape().layout();
SetVisited(*normalized_dynamic_slice);
HloInstruction* bc_to_orig = MakeBitcastHlo(normalized_dynamic_slice, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override {
const Shape& s = hlo->shape();
HloInstruction* operand = hlo->mutable_operand(0);
HloInstruction* update = hlo->mutable_operand(1);
const Shape& operand_shape = operand->shape();
TF_RET_CHECK(s.layout() == operand_shape.layout());
std::vector<int64_t> layout_as_permutation =
ToTransposeDimensions(hlo->shape().layout());
TF_ASSIGN_OR_RETURN(HloInstruction * new_operand,
GetNormalizedInput(operand));
TF_ASSIGN_OR_RETURN(HloInstruction * new_update,
GetNormalizedInput(update));
std::vector<HloInstruction*> new_start_indices =
GetNewStartIdxs(hlo, 2, layout_as_permutation);
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dus,
MakeDynamicUpdateSliceHlo(new_operand, new_update, new_start_indices,
&hlo->metadata()));
*new_dus->mutable_shape()->mutable_layout() = new_operand->shape().layout();
SetVisited(*new_dus);
HloInstruction* bc_to_orig = MakeBitcastHlo(new_dus, s);
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, bc_to_orig));
return absl::OkStatus();
}
absl::Status HandleClamp(HloInstruction* hlo) override {
return HandleTernary(hlo);
}
private:
absl::Status HandleTernary(HloInstruction* hlo) {
Shape s = hlo->shape();
HloOpcode opcode = hlo->opcode();
TF_RET_CHECK(opcode == HloOpcode::kClamp || opcode == HloOpcode::kSelect);
HloInstruction* p = hlo->mutable_operand(0);
HloInstruction* i1 = hlo->mutable_operand(1);
HloInstruction* i2 = hlo->mutable_operand(2);
TF_RET_CHECK(p->shape().layout() == s.layout());
TF_RET_CHECK(i1->shape().layout() == s.layout());
TF_RET_CHECK(i2->shape().layout() == s.layout());
TF_ASSIGN_OR_RETURN(HloInstruction * p_0, GetNormalizedInput(p));
TF_ASSIGN_OR_RETURN(HloInstruction * i1_0, GetNormalizedInput(i1));
TF_ASSIGN_OR_RETURN(HloInstruction * i2_0, GetNormalizedInput(i2));
TF_ASSIGN_OR_RETURN(Shape new_shape, ShapeInference::InferTernaryOpShape(
opcode, p_0, i1_0, i2_0));
HloInstruction* normalized = hlo->parent()->AddInstruction(
HloInstruction::CreateTernary(new_shape, opcode, p_0, i1_0, i2_0));
hlo->SetupDerivedInstruction(normalized);
SetVisited(*normalized);
HloInstruction* bc_to_orig = MakeBitcastHlo(normalized, s);
TF_RET | #include "xla/service/layout_normalization.h"
#include <functional>
#include <optional>
#include <utility>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class LayoutNormalizationTest : public HloTestBase {
public:
void CheckLayoutNormalization(
absl::string_view hlo, std::optional<absl::string_view> expected,
std::function<void(HloModule*)> after_pass_checks = nullptr) {
RunAndFilecheckHloRewrite(hlo, LayoutNormalization{}, expected,
after_pass_checks);
}
};
TEST_F(LayoutNormalizationTest, TestDefault) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,4]{0,1} parameter(0)
ROOT o = f32[5,4]{0,1} abs(p)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, TestUnary) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,4]{0,1} parameter(0)
a = f32[5,4]{0,1} abs(p)
ROOT b = f32[5,4]{0,1} sqrt(a)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, TestUnaryDegenerateDimensions) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,1,4,1]{0,1,2,3} parameter(0)
ROOT o = f32[5,1,4,1]{0,1,2,3} abs(p)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, TestBinary) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[5,4]{0,1} parameter(0)
b = f32[5,4]{0,1} parameter(1)
c = add(a, b)
ROOT out = sqrt(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Reshape) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[5,4]{0,1} parameter(0)
ROOT b = f32[5,2,2]{0,2,1} reshape(a)
})";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Transpose) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[5,4]{1,0} parameter(0)
t = f32[4,5]{0,1} transpose(a), dimensions={1,0}
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PhysicalTranspose) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f64[3,4,5]{0,1,2} parameter(0)
t = f32[5,4,3]{2,0,1} transpose(p), dimensions={2,1,0}
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PhysicalTransposeDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[3,4,5,1]{0,1,2,3} parameter(0)
t = f32[5,1,4,3]{3,2,0,1} transpose(p), dimensions={2,3,1,0}
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Copy) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[3,4,5]{0,1,2} parameter(0)
t = f32[3,4,5]{2,1,0} copy(p)
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, CopyDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[3,1,4,1,5]{0,1,2,3,4} parameter(0)
t = f32[3,1,4,1,5]{4,3,2,1,0} copy(p)
ROOT out = abs(t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Broadcast) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[4,5]{0,1} parameter(0)
b = f32[4,3,2,5]{0,1,2,3} broadcast(a), dimensions={0,3}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastOperandLayoutNotInverseOfItself) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[4,3,5]{0,2,1} parameter(0)
b = f32[4,3,2,5]{0,1,2,3} broadcast(a), dimensions={0,1,3}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastCustomOutputLayout) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,3]{1,0} parameter(0)
b = f32[2,4,3]{1,2,0} broadcast(a), dimensions={0,2}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastUnsortedDimensions) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,3]{1,0} parameter(0)
b = f32[3,4,2]{2,1,0} broadcast(a), dimensions={2,0}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastCustomOutputLayoutWithDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[9]{0} parameter(0)
b = f32[2,1,4,9]{2,0,1,3} broadcast(a), dimensions={3}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BroadcastWithDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,4,5]{0,1,2} parameter(0)
b = f32[1,4,3,1,2,5,1]{0,1,2,3,4,5,6} broadcast(a), dimensions={0,1,5}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, IotaCustomOutputLayout) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,4,3]{1,2,0} iota(), iota_dimension=2
ROOT out = abs(a)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Concatenate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[4,5]{0,1} parameter(0)
b = f32[4,5]{0,1} parameter(1)
c = f32[8,5]{0,1} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateDegenerateDim) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,4,5]{0,1,2} parameter(0)
b = f32[1,4,5]{0,1,2} parameter(1)
c = f32[2,4,5]{0,1,2} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateOneDegenerateDim) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,5]{0,1} parameter(0)
b = f32[2,5]{0,1} parameter(1)
c = f32[3,5]{0,1} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateOneDegenerateDimOfMany) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,5,1,4]{0,1,3,2} parameter(0)
b = f32[1,5,1,4]{0,1,3,2} parameter(1)
c = f32[2,5,1,4]{0,1,3,2} concatenate(a, b), dimensions={0}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConcatenateOtherDegenerateDim) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,5]{0,1} parameter(0)
b = f32[1,5]{0,1} parameter(1)
c = f32[1,10]{0,1} concatenate(a, b), dimensions={1}
ROOT out = abs(c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Reverse) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[2,3,5]{0,2,1} parameter(0)
b = f32[2,3,5]{0,2,1} reverse(a), dimensions={0,1}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ReverseDegenerateDimensions) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5]{0,2,1} parameter(0)
b = f32[1,3,5]{1,2,0} reverse(a), dimensions={0,1}
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Pad) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5,7]{0,2,1,3} parameter(0)
z = f32[] constant(0)
b = f32[1,13,15,7]{0,2,1,3} pad(a, z), padding=0_0x5_5x5_5x0_0
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PadDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5]{0,2,1} parameter(0)
z = f32[] constant(0)
b = f32[11,13,15]{0,2,1} pad(a, z), padding=5_5x5_5x5_5
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, PadOtherDimDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
a = f32[1,3,5,1]{0,2,1,3} parameter(0)
z = f32[] constant(0)
b = f32[11,13,7,1]{0,2,1,3} pad(a, z), padding=5_5x5_5x1_1x0_0
ROOT out = abs(b)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ReduceWindow) {
const char* hlo = R"(
HloModule R2Window
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2Window {
operand = f32[256,384]{0,1} parameter(0)
constant = f32[] constant(1)
ROOT reduce-window = f32[256,384]{0,1} reduce-window(operand, constant), window={size=2x3 pad=0_1x1_1}, to_apply=mul
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Constant) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[5,4]{0,1} parameter(0)
c = f32[5,4]{0,1} constant({...})
ROOT o = f32[5,4]{0,1} add(p, c)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, ConstantAvoidRevisitOfUser) {
const char* hlo = R"(
HloModule module
ENTRY main {
c = f32[5,4]{0,1} constant({...})
s = f32[5,4]{0,1} sine(c)
t = f32[5,4]{0,1} tanh(s)
ROOT o = f32[5,4]{0,1} add(s, t)
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Slice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{1,3,2,0} parameter(0)
ROOT converted = f32[1,4,6,6]{1,3,2,0} slice(input), slice={[0:1],[0:4],[0:6],[0:6]}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Select) {
const char* hlo = R"(
HloModule module
ENTRY main {
p0 = f32[1,17,9,9]{1,3,2,0} parameter(0)
p1 = f32[1,17,9,9]{1,3,2,0} parameter(1)
b = pred[1,17,9,9]{1,3,2,0} parameter(2)
ROOT out = f32[1,17,9,9]{1,3,2,0} select(b, p0, p1), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicSlice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[3,4,32]{1,0,2} parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT out = f32[1,4,32]{1,0,2} dynamic-slice(input, p1, p2, p3), dynamic_slice_sizes={1,4,32}, metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicSliceHasDegenerate) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,4,32]{1,0,2} parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT out = f32[1,4,32]{1,0,2} dynamic-slice(input, p1, p2, p3), dynamic_slice_sizes={1,4,32}, metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicUpdateSlice) {
const char* hlo = R"(
HloModule m
ENTRY main {
to_update = f32[3,1,32]{1,0,2} parameter(0)
updates = f32[1,1,32]{1,0,2} parameter(1)
p0 = s32[] parameter(2)
p1 = s32[] parameter(3)
p2 = s32[] parameter(4)
ROOT out = f32[3,1,32]{1,0,2} dynamic-update-slice(to_update, updates, p0, p1, p2), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, DynamicUpdateSliceNonDeg) {
const char* hlo = R"(
HloModule m
ENTRY main {
to_update = f32[5,3,32]{1,0,2} parameter(0)
updates = f32[1,1,32]{1,0,2} parameter(1)
p0 = s32[] parameter(2)
p1 = s32[] parameter(3)
p2 = s32[] parameter(4)
ROOT out = f32[5,3,32]{1,0,2} dynamic-update-slice(to_update, updates, p0, p1, p2), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Clamp) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = f32[64,1,32]{1,0,2} parameter(0)
p1 = f32[64,1,32]{1,0,2} parameter(1)
p2 = f32[64,1,32]{1,0,2} parameter(2)
ROOT out = f32[64,1,32]{1,0,2} clamp(f32[64,1,32]{1,0,2} p0, f32[64,1,32]{1,0,2} p1, f32[64,1,32]{1,0,2} p2), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BitcastConvertToBiggerType) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = u32[4,2]{0,1} parameter(0)
ROOT out = u64[4]{0} bitcast-convert(u32[4,2]{0,1} p0), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, BitcastConvertToSmallerType) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = u64[4]{0} parameter(0)
ROOT out = u32[4,2]{0,1} bitcast-convert(u64[4]{0} p0), metadata={op_name="test"}
}
)";
CheckLayoutNormalization(hlo, R"(
)");
}
TEST_F(LayoutNormalizationTest, Scatter) {
const char* hlo = R"(
HloModule simplified_scatter
region_0.10 {
Arg_0.11 = s16[] parameter(0)
Arg_1.12 = s16[] parameter(1)
ROOT maximum.13 = s16[] maximum(Arg_0.11, Arg_1.12)
}
ENTRY main.17 {
p0 = s16[3,2,2,14,16]{0,1,4,3,2} parameter(0)
p1 = s32[2,11]{0,1} parameter(1)
p2 = s16[11,3,5]{2,0,1} parameter(2)
ROOT scatter = s16[3,2,2,14,16]{0,1,4,3,2} scatter(p0, p1, p2), update_window_dims={1,2}, inserted_window_dims={1,2,3}, scatter_dims_to_operand_dims={4,0}, index_vector_dim=0, to_apply=region_0.10
}
)";
CheckLayoutNormalization(
hlo, R"(
)",
[](HloModule* module) {
TF_CHECK_OK(ScatterSimplifier().Run(module).status());
});
}
TEST_F(LayoutNormalizationTest, SimplifiedScatter) {
const char* hlo = R"(
HloModule simplified_scatter
region_0.10 {
Arg_0.11 = s16[] parameter(0)
Arg_1.12 = s16[] parameter(1)
ROOT maximum.13 = s16[] maximum(Arg_0.11, Arg_1.12)
}
ENTRY main.17 {
p0 = s16[16,3,2,2,14]{0,4,3,2,1} parameter(0)
p1 = s32[528,2]{1,0} parameter(1)
p2 = s16[528,5,3,1,1,1]{1,2,0,5,4,3} parameter(2)
ROOT scatter = s16[16,3,2,2,14]{0,4,3,2,1} scatter(p0, p1, p2), update_window_dims={1,2,3,4,5}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=region_0.10
}
)";
CheckLayoutNormalization(
hlo, R"(
)",
[](HloModule* module) {
TF_CHECK_OK(ScatterSimplifier().Run(module).status());
});
}
TEST_F(LayoutNormalizationTest, VariadicScatter) {
const char* hlo = R"(
HloModule simplified_scatter
region_0.10 {
Arg_0.11 = s16[] parameter(0)
Arg_1.12 = s16[] parameter(1)
Arg_2.13 = s16[] parameter(2)
Arg_3.14 = s16[] parameter(3)
maximum.15 = s16[] maximum(Arg_0.11, Arg_1.12)
maximum.16 = s16[] maximum(Arg_2.13, Arg_3.14)
ROOT res = (s16[], s16[]) tuple(maximum.15, maximum.16)
}
ENTRY main.17 {
p0 = s16[16,3,2,2,14]{0,4,3,2,1} parameter(0)
p1 = s16[16,3,2,2,14]{0,4,3,2,1} parameter(1)
p2 = s32[528,2]{1,0} parameter(2)
p3 = s16[528,5,3,1,1,1]{1,2,0,5,4,3} parameter(3)
p4 = s16[528,5,3,1,1,1]{1,2,0,5,4,3} parameter(4)
ROOT scatter = (s16[16,3,2,2,14]{0,4,3,2,1}, s16[16,3,2,2,14]{0,4,3,2,1}) scatter(p0, p1, p2, p3, p4), update_window_dims={1,2,3,4,5}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=region_0.10
}
)";
CheckLayoutNormalization(
hlo, R"(
)",
[](HloModule* module) {
TF_CHECK_OK(ScatterSimplifier().Run(module).status());
});
}
}
} |
1,924 | cpp | tensorflow/tensorflow | generic_transfer_manager | third_party/xla/xla/service/generic_transfer_manager.cc | third_party/xla/xla/service/generic_transfer_manager_test.cc | #ifndef XLA_SERVICE_GENERIC_TRANSFER_MANAGER_H_
#define XLA_SERVICE_GENERIC_TRANSFER_MANAGER_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include "absl/base/thread_annotations.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/xla_data.pb.h"
namespace xla {
class GenericTransferManager : public TransferManager {
public:
struct LiteralFromDeviceMetadata : public TransferManager::TransferMetadata {
bool callback_is_host_callback_safe = false;
};
GenericTransferManager(se::Platform::Id platform_id, size_t pointer_size);
se::Platform::Id PlatformId() const override;
void TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
MutableBorrowingLiteral literal, std::function<void(absl::Status)> done,
const TransferMetadata* transfer_metadata) override;
absl::Status TransferLiteralToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) override;
absl::Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) override;
absl::Status TransferLiteralFromOutfeed(
se::StreamExecutor* executor, MutableBorrowingLiteral literal) override;
absl::Status ResetDevices(
absl::Span<se::StreamExecutor* const> executors) override;
int64_t GetByteSizeRequirement(const Shape& shape) const override;
absl::Status WriteSingleTupleIndexTable(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) override;
Shape HostShapeToDeviceShape(const Shape& host_shape) const override;
private:
virtual absl::Status TransferBufferFromDevice(
se::Stream* stream, const se::DeviceMemoryBase& source, int64_t size,
void* destination);
virtual absl::Status TransferBufferToDevice(
se::Stream* stream, int64_t size, const void* source,
se::DeviceMemoryBase* destination);
virtual absl::Status TransferIntNArrayFromDevice(
se::Stream* stream, const se::DeviceMemoryBase& source,
PrimitiveType element_type, int64_t num_elements, void* destination);
virtual absl::Status TransferIntNArrayToDevice(
se::Stream* stream, PrimitiveType element_type, int64_t num_elements,
const void* source, se::DeviceMemoryBase* destination);
const se::Platform::Id platform_id_;
const size_t pointer_size_;
GenericTransferManager(const GenericTransferManager&) = delete;
GenericTransferManager& operator=(const GenericTransferManager&) = delete;
};
}
#endif
#include "xla/service/generic_transfer_manager.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
GenericTransferManager::GenericTransferManager(se::Platform::Id platform_id,
size_t pointer_size)
: platform_id_(platform_id), pointer_size_(pointer_size) {}
se::Platform::Id GenericTransferManager::PlatformId() const {
return platform_id_;
}
absl::Status GenericTransferManager::WriteSingleTupleIndexTable(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) {
TF_RET_CHECK(elements.size() == ShapeUtil::TupleElementCount(shape));
auto element_pointers = std::make_shared<std::vector<const void*>>();
element_pointers->reserve(elements.size());
for (const se::DeviceMemoryBase& element : elements) {
element_pointers->push_back(element.opaque());
}
TF_RETURN_IF_ERROR(TransferBufferToDevice(
stream, GetByteSizeRequirement(shape), element_pointers->data(), region));
TF_RETURN_IF_ERROR(
stream->DoHostCallback([element_pointers{std::move(element_pointers)}]() {
}));
return absl::OkStatus();
}
void GenericTransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
MutableBorrowingLiteral literal, std::function<void(absl::Status)> done,
const TransferMetadata* transfer_metadata) {
VLOG(2) << "transferring literal from device ordinal "
<< stream->parent()->device_ordinal()
<< "; device buffer: " << device_buffer;
absl::Status status = [&]() -> absl::Status {
TF_RET_CHECK(stream->parent()->device_ordinal() ==
device_buffer.device_ordinal());
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& subshape, const ShapeIndex& index) -> absl::Status {
if (subshape.IsArray()) {
if (PackSubbyteTypes() &&
primitive_util::IsSubByteNonPredType(subshape.element_type())) {
if (!subshape.is_static()) {
return absl::UnimplementedError(
"Int4 outputs with dynamic shapes are unsupported");
}
return TransferIntNArrayFromDevice(
stream,
device_buffer.buffer(index),
subshape.element_type(),
ShapeUtil::ElementsIn(subshape),
literal.untyped_data(index));
} else {
TF_RETURN_IF_ERROR(TransferBufferFromDevice(
stream,
device_buffer.buffer(index),
GetByteSizeRequirement(
ShapeUtil::GetSubshape(literal.shape(), index)),
literal.untyped_data(index)));
}
}
return absl::OkStatus();
}));
return absl::OkStatus();
}();
if (!status.ok()) {
done(status);
return;
}
if ((transfer_metadata != nullptr) &&
tensorflow::down_cast<const LiteralFromDeviceMetadata*>(transfer_metadata)
->callback_is_host_callback_safe) {
auto status = stream->DoHostCallback([done = std::move(done), stream] {
done(stream->ok() ? absl::OkStatus()
: Internal("`TransferLiteralFromDevice` failed"));
});
if (!status.ok()) {
LOG(ERROR) << "`DoHostCallback` failed: " << status;
}
} else {
done(stream->BlockHostUntilDone());
}
}
absl::Status GenericTransferManager::TransferLiteralToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* ) {
const Shape& shape = literal.shape();
VLOG(2) << "transferring literal shape to device: "
<< ShapeUtil::HumanString(shape)
<< "; device buffer: " << device_buffer;
TF_RET_CHECK(
ShapeUtil::Compatible(literal.shape(), device_buffer.on_device_shape()));
TF_RET_CHECK(stream->parent()->device_ordinal() ==
device_buffer.device_ordinal());
TF_RETURN_IF_ERROR(WriteTupleIndexTablesAsync(stream, device_buffer));
return ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& device_subshape,
const ShapeIndex& index) -> absl::Status {
if (device_subshape.IsArray()) {
int64_t size = GetByteSizeRequirement(device_subshape);
se::DeviceMemoryBase device_memory = device_buffer.buffer(index);
TF_RET_CHECK(size == device_memory.size());
auto TransferBuffer = [&](const void* source) {
if (PackSubbyteTypes() && primitive_util::IsSubByteNonPredType(
device_subshape.element_type())) {
if (!device_subshape.is_static()) {
return absl::UnimplementedError(absl::StrCat(
primitive_util::LowercasePrimitiveTypeName(
device_subshape.element_type()),
" inputs with dynamic shapes are unsupported"));
}
return TransferIntNArrayToDevice(
stream, device_subshape.element_type(),
ShapeUtil::ElementsIn(device_subshape),
source,
&device_memory);
} else {
return TransferBufferToDevice(stream, size,
source,
&device_memory);
}
};
LiteralSlice subliteral(literal, index);
if (device_subshape.layout() == subliteral.shape().layout()) {
return TransferBuffer(subliteral.untyped_data());
} else {
auto relaid_out = std::make_shared<Literal>(
subliteral.Relayout(device_subshape.layout()));
TF_RETURN_IF_ERROR(TransferBuffer(relaid_out->untyped_data()));
TF_RETURN_IF_ERROR(stream->DoHostCallback(
[keep_alive = std::move(relaid_out)] {}));
}
}
return absl::OkStatus();
});
}
absl::Status GenericTransferManager::TransferLiteralToInfeed(
se::StreamExecutor* executor, const LiteralSlice& literal) {
return Unimplemented("Generic transfer to Infeed");
}
absl::Status GenericTransferManager::TransferLiteralFromOutfeed(
se::StreamExecutor* executor, MutableBorrowingLiteral literal) {
return Unimplemented("Generic transfer from Outfeed");
}
absl::Status GenericTransferManager::ResetDevices(
absl::Span<se::StreamExecutor* const>
) {
return Unimplemented(
"Device reset is not yet supported on this platform (b/30481585)");
}
absl::Status GenericTransferManager::TransferBufferFromDevice(
se::Stream* stream, const se::DeviceMemoryBase& source, int64_t size,
void* destination) {
if (source.size() < size) {
return absl::FailedPreconditionError(absl::StrFormat(
"Source allocation on device not large enough for data transfer: "
"%d < %d",
source.size(), size));
}
return stream->Memcpy(destination, source, size);
}
absl::Status GenericTransferManager::TransferBufferToDevice(
se::Stream* stream, int64_t size, const void* source,
se::DeviceMemoryBase* destination) {
if (destination->size() < size) {
return absl::FailedPreconditionError(absl::StrFormat(
"Destination allocation on device not large enough for data transfer: "
"%d < %d",
destination->size(), size));
}
return stream->Memcpy(destination, source, size);
}
absl::Status GenericTransferManager::TransferIntNArrayFromDevice(
se::Stream* stream, const se::DeviceMemoryBase& source,
PrimitiveType element_type, int64_t num_elements, void* destination) {
int bit_width = primitive_util::BitWidth(element_type);
int64_t elements_per_byte = 8 / bit_width;
int64_t packed_size = CeilOfRatio(num_elements, elements_per_byte);
auto packed_dst_data = std::make_unique<std::vector<char>>(packed_size);
TF_RETURN_IF_ERROR(TransferBufferFromDevice(stream, source, packed_size,
packed_dst_data->data()));
TF_RETURN_IF_ERROR(
stream->DoHostCallback([destination, bit_width, num_elements,
packed_dst_data = std::move(packed_dst_data)]() {
UnpackIntN(
bit_width, *packed_dst_data,
absl::MakeSpan(static_cast<char*>(destination), num_elements));
}));
return absl::OkStatus();
}
absl::Status GenericTransferManager::TransferIntNArrayToDevice(
se::Stream* stream, PrimitiveType element_type, int64_t num_elements,
const void* source, se::DeviceMemoryBase* destination) {
int bit_width = primitive_util::BitWidth(element_type);
int64_t elements_per_byte = 8 / bit_width;
auto packed_src_data = std::make_unique<std::vector<char>>(
CeilOfRatio(num_elements, elements_per_byte));
PackIntN(bit_width,
absl::MakeSpan(static_cast<const char*>(source), num_elements),
absl::MakeSpan(*packed_src_data));
TF_RETURN_IF_ERROR(TransferBufferToDevice(
stream, packed_src_data->size(), packed_src_data->data(), destination));
return stream->DoHostCallback([keep_alive = std::move(packed_src_data)] {});
}
int64_t GenericTransferManager::GetByteSizeRequirement(
const Shape& shape) const {
if (shape.IsTuple() || shape.is_static()) {
return ShapeUtil::ByteSizeOf(shape, pointer_size_);
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return ShapeUtil::ByteSizeOf(shape, pointer_size_) + metadata_size;
}
Shape GenericTransferManager::HostShapeToDeviceShape(
const Shape& host_shape) const {
Shape device_shape = TransferManager::HostShapeToDeviceShape(host_shape);
if (PackSubbyteTypes() &&
primitive_util::IsSubByteNonPredType(device_shape.element_type())) {
device_shape.mutable_layout()->set_element_size_in_bits(
primitive_util::BitWidth(device_shape.element_type()));
}
return device_shape;
}
} | #include "xla/service/generic_transfer_manager.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/host/host_platform_id.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tests/literal_test_util.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class PackingTransferManager : public GenericTransferManager {
public:
using GenericTransferManager::GenericTransferManager;
bool pack_subbyte_types_ = true;
private:
bool PackSubbyteTypes() const override { return pack_subbyte_types_; }
};
class GenericTransferManagerTest : public ::testing::Test {
public:
GenericTransferManagerTest()
: transfer_manager_(se::host::kHostPlatformId,
sizeof(void*)) {}
void SetUp() override {
TF_ASSERT_OK_AND_ASSIGN(
se::Platform * platform,
se::PlatformManager::PlatformWithId(se::host::kHostPlatformId));
TF_ASSERT_OK_AND_ASSIGN(stream_executor_, platform->ExecutorForDevice(0));
TF_ASSERT_OK_AND_ASSIGN(stream_, stream_executor_->CreateStream());
allocator_ =
std::make_unique<se::StreamExecutorMemoryAllocator>(stream_executor_);
}
ScopedShapedBuffer AllocateBuffer(const Shape& shape) {
auto buffer =
transfer_manager_.AllocateScopedShapedBuffer(shape, allocator_.get(),
0);
return std::move(buffer.value());
}
PackingTransferManager transfer_manager_;
se::StreamExecutor* stream_executor_;
std::unique_ptr<se::Stream> stream_;
std::unique_ptr<se::DeviceMemoryAllocator> allocator_;
};
TEST_F(GenericTransferManagerTest, TransferLiteralToDevice) {
ScopedShapedBuffer buffer = AllocateBuffer(ShapeUtil::MakeShape(U16, {2, 2}));
Literal literal = LiteralUtil::CreateR2<uint16_t>({{1, 2}, {3, 4}});
TF_ASSERT_OK(transfer_manager_.TransferLiteralToDevice(stream_.get(), literal,
buffer));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
uint16_t* device_ptr = static_cast<uint16_t*>(device_mem.opaque());
std::vector<uint16_t> expected = {1, 2, 3, 4};
EXPECT_EQ(absl::Span<uint16_t>(device_ptr, expected.size()), expected);
}
MATCHER_P2(MaskedValuesEqual, mask, expected, "") {
if (arg.size() != expected.size()) {
*result_listener << "argument sizes do not match";
return false;
}
for (size_t i = 0; i < expected.size(); ++i) {
const auto v1 = arg[i] & mask;
const auto v2 = expected[i] & mask;
if (v1 != v2) {
*result_listener << "mismatch at position " << i << ", " << v1 << " vs "
<< v2;
return false;
}
}
return true;
}
TEST_F(GenericTransferManagerTest, TransferLiteralToDeviceInt4) {
Literal literal =
LiteralUtil::CreateR2<s4>({{s4{1}, s4{-2}}, {s4{-3}, s4{4}}});
for (bool pack : {false, true}) {
SCOPED_TRACE(absl::StrCat("pack=", pack));
transfer_manager_.pack_subbyte_types_ = pack;
ScopedShapedBuffer buffer =
AllocateBuffer(ShapeUtil::MakeShape(S4, {2, 2}));
TF_ASSERT_OK(transfer_manager_.TransferLiteralToDevice(stream_.get(),
literal, buffer));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
ASSERT_EQ(device_mem.size(), pack ? 2 : 4);
int8_t* device_ptr = static_cast<int8_t*>(device_mem.opaque());
std::vector<int8_t> expected =
pack ? std::vector<int8_t>{static_cast<int8_t>(0x1e),
static_cast<int8_t>(0xd4)}
: std::vector<int8_t>{1, -2, -3, 4};
EXPECT_THAT(absl::Span<int8_t>(device_ptr, expected.size()),
MaskedValuesEqual(pack ? 0xFF : 0x0F, expected));
}
}
TEST_F(GenericTransferManagerTest, TransferLiteralFromDevice) {
ScopedShapedBuffer buffer = AllocateBuffer(ShapeUtil::MakeShape(U16, {2, 2}));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
uint16_t* device_ptr = static_cast<uint16_t*>(device_mem.opaque());
for (int i = 0; i < 4; i++) {
device_ptr[i] = i + 1;
}
TF_ASSERT_OK_AND_ASSIGN(
Literal literal,
transfer_manager_.TransferManager::TransferLiteralFromDevice(
stream_.get(), buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(
literal, LiteralUtil::CreateR2<uint16_t>({{1, 2}, {3, 4}})));
}
TEST_F(GenericTransferManagerTest, TransferLiteralFromDeviceInt4) {
for (bool pack : {false, true}) {
SCOPED_TRACE(absl::StrCat("pack=", pack));
transfer_manager_.pack_subbyte_types_ = pack;
ScopedShapedBuffer buffer =
AllocateBuffer(ShapeUtil::MakeShape(S4, {2, 2}));
se::DeviceMemoryBase device_mem = buffer.buffers().element({});
uint8_t* device_ptr = static_cast<uint8_t*>(device_mem.opaque());
if (pack) {
ASSERT_EQ(device_mem.size(), 2);
device_ptr[0] = 0x1e;
device_ptr[1] = 0xd4;
} else {
ASSERT_EQ(device_mem.size(), 4);
device_ptr[0] = 1;
device_ptr[1] = -2;
device_ptr[2] = -3;
device_ptr[3] = 4;
}
TF_ASSERT_OK_AND_ASSIGN(
Literal literal,
transfer_manager_.TransferManager::TransferLiteralFromDevice(
stream_.get(), buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(
literal,
LiteralUtil::CreateR2<s4>({{s4{1}, s4{-2}}, {s4{-3}, s4{4}}})));
}
}
}
} |
1,925 | cpp | tensorflow/tensorflow | conditional_code_motion | third_party/xla/xla/service/conditional_code_motion.cc | third_party/xla/xla/service/conditional_code_motion_test.cc | #ifndef XLA_SERVICE_CONDITIONAL_CODE_MOTION_H_
#define XLA_SERVICE_CONDITIONAL_CODE_MOTION_H_
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace conditional_opt {
class Boundary {
public:
enum class Position {
kInsideBranch,
kOutsideBranchUser,
kOutsideBranchOperand,
kUndefined
};
Boundary() : position_(Position::kUndefined) {}
explicit Boundary(Position p) : position_(p) {}
std::vector<HloInstruction*>& mutable_operands() { return operands_; }
const std::vector<HloInstruction*>& operands() const { return operands_; }
bool IsInsideBranch() const { return position_ == Position::kInsideBranch; }
bool IsOutsideBranchUser() const {
return position_ == Position::kOutsideBranchUser;
}
bool IsOutsideBranchOperand() const {
return position_ == Position::kOutsideBranchOperand;
}
Position GetPosition() const { return position_; }
bool IsEmpty() const { return operands_.empty(); }
std::string ToString() const {
std::string res;
for (HloInstruction* op : operands_) {
res += op->ToString() + ";";
}
return res;
}
bool operator==(const Boundary& that) const {
return absl::c_equal(operands_, that.operands_);
}
template <typename H>
friend H AbslHashValue(H h, const Boundary& boundary) {
return H::combine(std::move(h), boundary.operands_);
}
private:
std::vector<HloInstruction*> operands_;
Position position_;
};
class ConditionalCodeMotion : public HloModulePass {
public:
explicit ConditionalCodeMotion(bool is_layout_sensitive,
bool pursue_full_conditional_code_motion,
int64_t search_config = 0,
int64_t memory_increase_allowance = 5000)
: is_layout_sensitive_(is_layout_sensitive),
pursue_full_conditional_code_motion_(
pursue_full_conditional_code_motion && search_config == 0),
search_config_index_(0),
memory_increase_allowance_(memory_increase_allowance) {
search_config_.push_back(search_config);
if (search_config != 0) {
search_config_map_[0] = search_config_;
}
}
explicit ConditionalCodeMotion(bool is_layout_sensitive,
bool pursue_full_conditional_code_motion,
std::string search_config,
int64_t memory_increase_allowance = 5000)
: is_layout_sensitive_(is_layout_sensitive),
pursue_full_conditional_code_motion_(
pursue_full_conditional_code_motion && search_config.empty()),
search_config_index_(-1),
memory_increase_allowance_(memory_increase_allowance) {
ParseSearchConfiguration(search_config);
}
void ParseSearchConfiguration(const std::string& search_config);
static constexpr int kMaxPos = 16;
static constexpr int kStartPos = 0;
static constexpr int kStridePos = 32;
static constexpr int kValueMask = 0xffff;
static int64_t MakeSearchConfig(int64_t start, int64_t max, int64_t stride) {
const int64_t config =
(max << kMaxPos) + (start << kStartPos) + (stride << kStridePos);
VLOG(2) << "flip stride = " << flip_stride(config) << "\n";
VLOG(2) << "flig config = " << config << "\n";
return config;
}
static int16_t flip_start(int64_t search_config) {
return (search_config >> kStartPos) & kValueMask;
}
static int16_t flip_stride(int64_t search_config) {
return (search_config >> kStridePos) & kValueMask;
}
static int16_t DecrementMaxFlip(int64_t* search_config) {
const int16_t max_flip = ((*search_config) >> kMaxPos) & kValueMask;
if (max_flip > 0) {
*search_config -= (1 << kMaxPos);
}
return max_flip;
}
absl::string_view name() const override { return "conditional-code-motion"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
class Decision {
public:
enum class Direction : uint8_t {
kMoveOutOfBranch,
kMoveIntoBranch,
kNoChange
};
public:
Decision(Direction direction, int benefit)
: direction_(direction), benefit_(benefit) {}
Direction GetDirection() const { return direction_; }
int GetBenefit() const { return benefit_; }
private:
Direction direction_;
int benefit_;
};
virtual Decision ConsiderCodeMotion(
HloInstruction* conditional, const Boundary& cur_boundary,
std::vector<Boundary>& to_move, std::vector<Boundary>& new_boundaries,
absl::flat_hash_map<HloInstruction*, int>& visited_count);
private:
const bool is_layout_sensitive_;
const bool pursue_full_conditional_code_motion_;
std::vector<int64_t> search_config_;
int64_t search_config_index_;
absl::flat_hash_map<int64_t, std::vector<int64_t>> search_config_map_;
std::vector<std::vector<int64_t>> move_config_, reuse_config_;
int64_t memory_increase_allowance_ = 5000;
int64_t memory_increase_ = 0;
absl::StatusOr<bool> MoveInstructionOut(
HloInstruction* conditional, std::vector<Boundary>& to_move_out,
std::vector<Boundary>& new_boundaries);
absl::StatusOr<bool> MoveUserInstructionsIn(
HloInstruction* conditional, std::vector<Boundary>& to_move_in);
absl::StatusOr<bool> MoveOperandInstructionsIn(
HloInstruction* conditional, std::vector<Boundary>& to_move_in);
void SetDefaultMoveConfig();
};
}
}
#endif
#include "xla/service/conditional_code_motion.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace conditional_opt {
HloInstruction* CloneNestedTuples(HloInstruction* tuple) {
if (!tuple->shape().IsTuple()) {
return tuple;
}
std::vector<HloInstruction*> tuple_users, gte_users;
for (int i = 0; i < tuple->shape().tuple_shapes_size(); ++i) {
gte_users.push_back(nullptr);
}
for (auto* tuple_user : tuple->users()) {
VLOG(2) << "tuple_user: " << tuple_user->ToString() << "\n";
if (tuple_user->opcode() != HloOpcode::kGetTupleElement ||
tuple_user == tuple->parent()->root_instruction()) {
tuple_users.push_back(tuple_user);
} else {
gte_users[tuple_user->tuple_index()] = tuple_user;
}
}
if (!tuple_users.empty() || tuple->user_count() == 0 ||
tuple == tuple->parent()->root_instruction()) {
VLOG(5) << "CLONING: " << tuple->ToString() << "\n";
int64_t tuple_size = tuple->shape().tuple_shapes_size();
std::vector<HloInstruction*> operands;
operands.reserve(tuple_size);
for (int64_t j = 0; j < tuple_size; ++j) {
HloInstruction* gte =
(gte_users[j] == nullptr)
? tuple->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
tuple->shape().tuple_shapes(j), tuple, j))
: gte_users[j];
CHECK_NE(gte, nullptr);
operands.push_back(CloneNestedTuples(gte));
}
HloInstruction* new_tuple =
tuple->parent()->AddInstruction(HloInstruction::CreateTuple(operands));
VLOG(2) << "new_tuple: " << new_tuple->ToString() << "\n";
if (tuple == tuple->parent()->root_instruction()) {
tuple->parent()->set_root_instruction(new_tuple,
true);
} else {
for (auto tuple_user : tuple_users) {
TF_CHECK_OK(tuple->ReplaceUseWithDifferentShape(tuple_user, new_tuple));
}
}
return new_tuple;
}
for (auto gte_user : gte_users) {
if (gte_user != nullptr) {
auto gte = CloneNestedTuples(gte_user);
CHECK_NE(gte, nullptr);
}
}
return tuple;
}
class BoundaryVisitor {
public:
explicit BoundaryVisitor(HloInstruction* conditional) {
Boundary b(Boundary::Position::kInsideBranch);
b.mutable_operands().push_back(conditional);
worklist_.push_back(b);
}
BoundaryVisitor() {}
Boundary PopNextBoundary() {
CHECK(!worklist_.empty());
Boundary b = worklist_.front();
worklist_.pop_front();
while (!worklist_.empty() && ContainsKey(visited_, b)) {
b = worklist_.front();
worklist_.pop_front();
}
visited_.insert(b);
return b;
}
void AddToWorkList(const Boundary& b) {
CHECK(!b.operands().empty());
worklist_.push_back(b);
}
bool HasNextBoundary() {
while (!worklist_.empty()) {
Boundary b = worklist_.front();
if (!ContainsKey(visited_, b)) {
break;
}
worklist_.pop_front();
}
return !worklist_.empty();
}
private:
std::deque<Boundary> worklist_;
absl::flat_hash_set<Boundary> visited_;
};
template <class OpCollection>
int64_t CountNonLeafOps(const OpCollection& ops) {
absl::flat_hash_set<HloInstruction*> op_set;
for (auto op : ops) {
if (!op_set.contains(op) && op->opcode() != HloOpcode::kConstant) {
op_set.insert(op);
}
}
return op_set.size();
}
int64_t ReusesCarriedBy(HloOpcode op, HloOpcode user) {
switch (user) {
case HloOpcode::kGetTupleElement:
return 0;
case HloOpcode::kConvert:
switch (op) {
case HloOpcode::kConvolution:
case HloOpcode::kDot:
return 0;
default:
break;
}
break;
default:
break;
}
switch (op) {
case HloOpcode::kParameter:
case HloOpcode::kConstant:
case HloOpcode::kGetTupleElement:
return 0;
case HloOpcode::kConditional:
return 10;
default:
return -10;
}
}
bool WorthHoisting(HloOpcode op, HloOpcode child_op) {
switch (op) {
case HloOpcode::kConvert:
switch (child_op) {
case HloOpcode::kAllReduce:
case HloOpcode::kReshape:
case HloOpcode::kGetTupleElement:
return true;
default:
return false;
}
case HloOpcode::kGetTupleElement:
case HloOpcode::kTuple:
switch (child_op) {
case HloOpcode::kParameter:
return false;
default:
return true;
}
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kReduce:
case HloOpcode::kConstant:
case HloOpcode::kReshape:
case HloOpcode::kBroadcast:
return true;
default:
if (HloInstruction::IsOpElementwise(op)) {
return true;
}
return false;
}
}
bool InstructionWithinBranchIdentical(
const std::vector<HloInstruction*>& instructions,
bool is_layout_sensitive) {
auto eq_operand = [&](const HloInstruction* a, const HloInstruction* b) {
bool eq_operands = is_layout_sensitive
? ShapeUtil::Equal(a->shape(), b->shape())
: ShapeUtil::Compatible(a->shape(), b->shape());
return eq_operands;
};
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
if (instructions.empty()) {
return false;
}
if (instructions[0]->IsCrossModuleAllReduce()) {
return std::all_of(
instructions.begin(), instructions.end(),
[&](HloInstruction* instruction) {
if (!instruction->IsCrossModuleAllReduce()) {
return false;
}
auto old_channel_id = instruction->channel_id();
instruction->set_channel_id(instructions[0]->channel_id());
bool eq_instructions = instructions[0]->Identical(
*instruction, eq_operand, eq_computations, is_layout_sensitive);
instruction->set_channel_id(old_channel_id);
return eq_instructions;
});
}
return std::all_of(instructions.begin(), instructions.end(),
[&](HloInstruction* instruction) {
return instructions[0]->Identical(
*instruction, eq_operand, eq_computations,
is_layout_sensitive);
});
}
void CopyOutOfConditional(
Boundary& boundary, HloInstruction* conditional,
absl::flat_hash_map<Boundary, Boundary>& hoisted_boundaries) {
CHECK(boundary.IsInsideBranch());
absl::InlinedVector<HloInstruction*, 4> new_operands;
const HloInstruction* branch0_inst = boundary.operands()[0];
for (int i = 0; i < branch0_inst->operands().size(); ++i) {
Boundary operand_boundary(boundary.GetPosition());
for (HloInstruction* operand : boundary.operands()) {
operand_boundary.mutable_operands().push_back(operand->operands()[i]);
}
VLOG(2) << "Looking for: " << operand_boundary.ToString();
auto hoisted_boundaries_it = hoisted_boundaries.find(operand_boundary);
CHECK(hoisted_boundaries_it != hoisted_boundaries.end());
Boundary hoisted_boundary = hoisted_boundaries_it->second;
CHECK(hoisted_boundary.IsOutsideBranchUser());
CHECK_EQ(hoisted_boundary.operands().size(), 1);
new_operands.push_back(hoisted_boundary.operands()[0]);
}
HloInstruction* new_instruction = conditional->parent()->AddInstruction(
branch0_inst->CloneWithNewOperands(branch0_inst->shape(), new_operands));
VLOG(2) << "new instruction:" << new_instruction->ToString();
Boundary hoisted_boundary(Boundary::Position::kOutsideBranchUser);
hoisted_boundary.mutable_operands().push_back(new_instruction);
hoisted_boundaries[boundary] = hoisted_boundary;
}
void CopyIntoConditional(
Boundary& boundary, HloInstruction* conditional,
absl::flat_hash_map<Boundary, Boundary>& hoisted_boundaries) {
CHECK(boundary.IsOutsideBranchUser() || boundary.IsOutsideBranchOperand());
CHECK_EQ(boundary.operands().size(), 1);
int num_branches = conditional->branch_count();
std::vector<absl::InlinedVector<HloInstruction*, 4>> new_operands(
num_branches);
HloInstruction* op = boundary.operands()[0];
for (HloInstruction* operand : op->operands()) {
Boundary operand_boundary(boundary.GetPosition());
operand_boundary.mutable_operands().push_back(operand);
VLOG(2) << "Looking for: " << operand_boundary.ToString();
auto hoisted_boundaries_it = hoisted_boundaries.find(operand_boundary);
if (hoisted_boundaries_it != hoisted_boundaries.end()) {
Boundary hoisted_boundary = hoisted_boundaries_it->second;
CHECK(hoisted_boundary.IsInsideBranch());
CHECK_EQ(hoisted_boundary.operands().size(), num_branches);
for (int j = 0; j < num_branches; ++j) {
new_operands[j].push_back(hoisted_boundary.operands()[j]);
}
} else {
for (int j = 0; j < num_branches; ++j) {
switch (operand->opcode()) {
case HloOpcode::kConstant: {
auto new_operand =
conditional->branch_computation(j)->AddInstruction(
operand->Clone());
VLOG(2) << "new instruction:" << new_operand->ToString();
new_operands[j].push_back(new_operand);
break;
}
case HloOpcode::kGetTupleElement: {
auto gte = Cast<HloGetTupleElementInstruction>(operand);
int64_t index = gte->tuple_index();
HloInstruction* root =
conditional->branch_computation(j)->root_instruction();
CHECK(root->opcode() == HloOpcode::kTuple &&
index < root->operand_count())
<< root->ToString() << " " << gte->ToString();
auto new_operand = root->mutable_operand(index);
VLOG(2) << "new instruction:" << new_operand->ToString();
new_operands[j].push_back(new_operand);
break;
}
default:
LOG(FATAL) << "Unexpected out-of-boundary instruction:"
<< operand->ToString() << "\n";
}
}
}
}
Boundary hoisted_boundary(Boundary::Position::kInsideBranch);
for (int j = 0; j < num_branches; ++j) {
HloInstruction* new_instruction =
conditional->branch_computation(j)->AddInstruction(
op->CloneWithNewOperands(op->shape(), new_operands[j]));
VLOG(2) << "new instruction:" << new_instruction->ToString();
hoisted_boundary.mutable_operands().push_back(new_instruction);
}
hoisted_boundaries[boundary] = hoisted_boundary;
}
absl::flat_hash_set<int64_t> FindSpecialConverts(HloInstruction* old_root,
int branch_count,
HloInstruction* conditional,
bool is_layout_sensitive) {
absl::flat_hash_set<int64_t> special_convert;
auto convert_invalid =
[](const HloInstruction* convert_set_candidate) -> bool {
bool invalid_user = absl::c_any_of(
convert_set_candidate->users(), [](const HloInstruction* user) -> bool {
return (user->opcode() == HloOpcode::kConvert);
});
bool invalid_producer =
absl::c_any_of(convert_set_candidate->operands(),
[](const HloInstruction* operand) -> bool {
return (operand->opcode() == HloOpcode::kConvert);
});
return (invalid_user || invalid_producer);
};
for (int64_t operand_num = 0; operand_num < old_root->operand_count();
++operand_num) {
if (old_root->operand(operand_num)->opcode() != HloOpcode::kConvert) {
continue;
}
bool replica = true;
HloInstruction* special_convert_candidate =
old_root->mutable_operand(operand_num);
auto repeated =
absl::c_count_if(old_root->operands(),
[&](const HloInstruction* operand) -> bool {
return (special_convert_candidate == operand);
}) > 1;
if (convert_invalid(special_convert_candidate) || repeated) {
continue;
}
for (int others = 1; others < branch_count; ++others) {
HloInstruction* others_root =
conditional->branch_computation(others)->root_instruction();
const HloInstruction* other_convert = others_root->operand(operand_num);
if (other_convert->opcode() != HloOpcode::kConvert ||
convert_invalid(other_convert)) {
replica = false;
break;
}
bool eq_shape =
is_layout_sensitive
? ShapeUtil::Equal(other_convert->shape(),
special_convert_candidate->shape()) &&
ShapeUtil::Equal(
other_convert->operand(0)->shape(),
special_convert_candidate->operand(0)->shape())
: ShapeUtil::Compatible(other_convert->shape(),
special_convert_candidate->shape()) &&
ShapeUtil::Compatible(
other_convert->operand(0)->shape(),
special_convert_candidate->operand(0)->shape());
if (!eq_shape) {
replica = false;
break;
}
auto repeated =
absl::c_count_if(others_root->operands(),
[&](const HloInstruction* operand) -> bool {
return (special_convert_candidate == operand);
}) > 1;
if (repeated) {
replica = false;
break;
}
}
if (replica) {
special_convert.insert(operand_num);
}
}
return special_convert;
}
absl::Status RestructureConditionalInstruction(HloComputation* computation,
HloInstruction* conditional) {
HloInstruction* old_root = computation->root_instruction();
std::vector<HloInstruction*> new_operands;
int cur_index = 0;
for (; cur_index < ShapeUtil::TupleElementCount(conditional->shape());
++cur_index) {
new_operands.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(conditional->shape(), cur_index),
conditional, cur_index)));
}
HloInstruction* new_tuple =
computation->AddInstruction(HloInstruction::CreateTuple(new_operands));
if (old_root == conditional) {
computation->set_root_instruction(new_tuple);
} else {
std::vector<HloInstruction*> new_tuple_users;
for (auto conditional_user : conditional->users()) {
auto is_new_gte = absl::c_find_if(
new_operands,
[&](HloInstruction* instr) { return instr == conditional_user; });
if (is_new_gte == new_operands.end()) {
new_tuple_users.push_back(conditional_user);
}
}
for (auto new_tuple_user : new_tuple_users) {
TF_RETURN_IF_ERROR(
conditional->ReplaceUseWith(new_tuple_user, new_tuple));
}
}
VLOG(2) << "computation after root restructure:\n" << computation->ToString();
return absl::OkStatus();
}
absl::StatusOr<bool> ConvertSpecialMove(HloInstruction* conditional,
bool is_layout_sensitive) {
int branch_count = conditional->branch_count();
if (branch_count <= 0) {
return false;
}
for (int branch_num = 0; branch_num < branch_count; ++branch_num) {
HloInstruction* branch_root =
conditional->branch_computation(branch_num)->root_instruction();
if (branch_root->opcode() != HloOpcode::kTuple) {
return false;
}
}
HloInstruction* old_root =
conditional->branch_computation(0)->root_instruction();
VLOG(2) << "BEFORE :" << conditional->GetModule()->ToString();
auto find_gte = [](const HloInstruction* conditional_result,
int64_t index) -> HloInstruction* {
for (HloInstruction* instr : conditional_result->users()) {
if (instr->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
if (instr->tuple_index() == index) {
return instr;
}
}
return nullptr;
}; | #include "xla/service/conditional_code_motion.h"
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace xla {
namespace conditional_opt {
using ConditionalCodeMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(ConditionalCodeMotionTest, MoveSubsetTupleOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(%convert.2894, %reshape.8493)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(%convert.3604, %add)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
get-first-index.2 = f32[2,512,364]{2,1,0} get-tuple-element(conditional), index=1
ROOT result = (bf16[2,512,364]{2,1,0}, f32[2,512,364]{2,1,0}) tuple(get-first-index, get-first-index.2)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert(), op::GetTupleElement())));
}
TEST_F(ConditionalCodeMotionTest, VerifyConditionalAnalysisWithWhileTuple) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
body {
%p_body = (f32[2], bf16[2], s32[]) parameter(0)
%val = f32[2] get-tuple-element(p_body), index=0
%val2 = bf16[2] get-tuple-element(p_body), index=1
%const = s32[] constant(-1)
ROOT root = (f32[2], bf16[2], s32[]) tuple(%val, %val2, %const)
}
condition {
%p_cond = (f32[2], bf16[2], s32[]) parameter(0)
%gte = s32[] get-tuple-element(%p_cond), index=2
%const = s32[] constant(42)
ROOT result = pred[] compare(%gte, %const), direction=EQ
}
on_true {
%arg_tuple.1 = f32[2] parameter(0)
%const = s32[] constant(42)
%add.8493 = f32[2] add(f32[2] %arg_tuple.1, f32[2] %arg_tuple.1)
%convert.2894 = bf16[2] convert(f32[2] %add.8493)
ROOT %tuple.1 = (f32[2], bf16[2], s32[]) tuple(%add.8493, %convert.2894, %const)
}
on_false {
%arg_tuple.1 = f32[2] parameter(0)
%const = s32[] constant(42)
%add.8493 = f32[2] add(f32[2] %arg_tuple.1, f32[2] %arg_tuple.1)
%convert.2894 = bf16[2] convert(f32[2] %add.8493)
%while_init = (f32[2], bf16[2], s32[]) tuple(%add.8493, %convert.2894, %const)
ROOT while = (f32[2], bf16[2], s32[]) while(%while_init), condition=condition, body=body
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = f32[2] parameter(1)
ROOT conditional = (f32[2], bf16[2], s32[]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOutConditionalRoot) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
ROOT conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert())));
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOutConditional) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, AllOf(op::Tuple(op::Convert())));
}
TEST_F(ConditionalCodeMotionTest, ConditionalShapeNotMutable) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.8493, f32[2,512,364]{2,1,0} %reshape.8493)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %add.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%add.8493 = f32[2,512,364]{2,1,0} add(f32[2,512,364]{2,1,0} %reshape.9717, f32[2,512,364]{2,1,0} %reshape.9717)
%sub.8493 = f32[2,512,364]{2,1,0} subtract(f32[2,512,364]{2,1,0} %add.8493, f32[2,512,364]{2,1,0} %reshape.9717)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
ROOT result = (bf16[2,512,364]{2,1,0}, (bf16[2,512,364]{2,1,0})) tuple(get-first-index, conditional)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveConvertOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveDotOpOut
on_true {
%arg_tuple.1 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%reshape.8493 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.1)
%convert.2894 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.8493)
ROOT %tuple.1 = ( bf16[2,512,364]{2,1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (f32[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = f32[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%reshape.9717 = f32[2,512,364]{2,1,0} reshape(f32[93184,4]{1,0} %get-tuple-element.3)
%convert.3604 = bf16[2,512,364]{2,1,0} convert(f32[2,512,364]{2,1,0} %reshape.9717), metadata={op_type="Cast" op_name="gradients/Cast_125_grad/Cast"}
ROOT %tuple.2 = (bf16[2,512,364]{2,1,0}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (f32[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (f32[93184,4]{1,0}) parameter(2)
conditional = (bf16[2,512,364]{2,1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = bf16[2,512,364]{2,1,0} get-tuple-element(conditional), index=0
add.1 = bf16[2,512,364]{2,1,0} add(bf16[2,512,364]{2,1,0} get-first-index, bf16[2,512,364]{2,1,0} get-first-index)
ROOT result = (bf16[2,512,364]{2,1,0}) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK_NE(conditional, nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 1);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 1);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::Reshape(op::GetTupleElement(op::Conditional()))),
op::Convert(op::Reshape(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, UserShareOperandCannotBeMoved) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
constant.3 = f32[] constant(3)
constant.4 = f32[] constant(4)
constant.5 = f32[] constant(5)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(add.1, constant.2)
add.3 = f32[] add(add.1, constant.3)
add.4 = f32[] add(add.3, constant.5)
multiply.1 = f32[] multiply(add.4, constant.4)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.1, add.4)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.6 = f32[] constant(1)
constant.7 = f32[] constant(2)
constant.8 = f32[] constant(3)
constant.9 = f32[] constant(4)
constant.10 = f32[] constant(5)
add.4 = f32[] add(get-tuple-element.2, constant.6)
sub.1 = f32[] subtract(add.4, constant.7)
add.5 = f32[] add(add.4, constant.8)
add.6 = f32[] add(add.5, constant.10)
multiply.2 = f32[] multiply(sub.1, constant.9)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.2, add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
conditional = (f32[], f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[] get-tuple-element(conditional), index=0
get-second-index = f32[] get-tuple-element(conditional), index=1
ROOT result = f32[] add(get-first-index, get-second-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 9);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 11);
std::optional<int> on_false_sub_idx;
std::optional<int> on_false_add_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kAdd) {
on_false_add_idx = i;
} else if (root_operand->opcode() == HloOpcode::kSubtract) {
on_false_sub_idx = i;
}
}
ASSERT_TRUE(on_false_add_idx.has_value());
ASSERT_TRUE(on_false_sub_idx.has_value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
AllOf(op::Add(
op::Multiply(
op::GetTupleElement(op::Conditional(), *on_false_sub_idx),
op::Constant()),
op::GetTupleElement(op::Conditional(), *on_false_add_idx))));
}
TEST_F(ConditionalCodeMotionTest, ConditionalBoundaryAliasingBug) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[], f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.1), index=1
cos = f32[] cosine(get-tuple-element.2)
multiply.1 = f32[] multiply(get-tuple-element.1, cos)
ROOT res.1 = (f32[], f32[]) tuple(multiply.1, cos)
}
on_false {
arg_tuple.1 = (f32[], f32[]) parameter(0)
get-tuple-element.3 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.6 = f32[] constant(3)
multiply.2 = f32[] multiply(get-tuple-element.3, constant.6)
constant.2 = f32[] constant(0)
ROOT res.2 = (f32[], f32[]) tuple(multiply.2, constant.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
param.2 = f32[] parameter(1)
param.3 = f32[] parameter(2)
tuple = (f32[], f32[]) tuple(param.2, param.3)
conditional = (f32[], f32[])
conditional(pred.1, tuple, tuple), true_computation=on_true,
false_computation=on_false
get-tuple-element.3 = f32[] get-tuple-element(conditional), index=0
get-tuple-element.4 = f32[] get-tuple-element(conditional), index=1
ROOT result = f32[] add(get-tuple-element.3, get-tuple-element.4)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_false = conditional->branch_computation(1);
std::optional<int> on_false_gte_idx;
std::optional<int> on_false_const_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kGetTupleElement) {
on_false_gte_idx = i;
} else if (root_operand->opcode() == HloOpcode::kConstant) {
on_false_const_idx = i;
}
}
ASSERT_TRUE(on_false_gte_idx.has_value());
ASSERT_TRUE(on_false_const_idx.has_value());
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root->operand(0),
op::Multiply(
op::GetTupleElement(op::Conditional(), *on_false_gte_idx),
op::GetTupleElement(op::Conditional(), *on_false_const_idx)));
}
TEST_F(ConditionalCodeMotionTest, ConditionalRootElementChanged) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(get-tuple-element.1, constant.2)
add.3 = f32[] add(add.1, add.2)
ROOT tuple.3 = (f32[]) tuple(add.3)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.3 = f32[] constant(1)
constant.4 = f32[] constant(2)
add.4 = f32[] add(constant.4, constant.3)
add.5 = f32[] add(get-tuple-element.2, constant.4)
add.6 = f32[] add(add.4, add.5)
ROOT tuple.4 = (f32[]) tuple(add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
conditional = (f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
get-first-index = f32[] get-tuple-element(conditional), index=0
ROOT result = f32[] add(get-first-index, get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
const HloComputation* on_true = conditional->branch_computation(0);
EXPECT_EQ(on_true->instruction_count(), 3);
EXPECT_THAT(on_true->root_instruction(),
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 0)));
const HloComputation* on_false = conditional->branch_computation(1);
EXPECT_EQ(on_false->instruction_count(), 4);
std::optional<int> on_false_const_idx;
std::optional<int> on_false_gte_idx;
for (int i = 0; i < on_false->root_instruction()->operand_count(); ++i) {
const HloInstruction* root_operand =
on_false->root_instruction()->operand(i);
if (root_operand->opcode() == HloOpcode::kConstant) {
on_false_const_idx = i;
} else if (root_operand->opcode() == HloOpcode::kGetTupleElement) {
on_false_gte_idx = i;
}
}
ASSERT_TRUE(on_false_const_idx.has_value());
ASSERT_TRUE(on_false_gte_idx.has_value());
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(2.0)));
EXPECT_THAT(on_false->root_instruction()->operand(*on_false_gte_idx),
op::GetTupleElement(op::Parameter(0), 0));
HloInstruction* root = module->entry_computation()->root_instruction();
auto get_first_index_matcher = op::Add(
op::Add(op::GetTupleElement(op::Conditional(), *on_false_const_idx),
op::Constant(LiteralUtil::CreateR0<float>(1.0))),
op::Add(op::GetTupleElement(op::Conditional(), *on_false_gte_idx),
op::Constant(LiteralUtil::CreateR0<float>(2.0))));
EXPECT_THAT(root, op::Add(get_first_index_matcher, get_first_index_matcher));
}
TEST_F(ConditionalCodeMotionTest, ConditionalIsRootInstruction) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
on_true {
arg_tuple.1 = (f32[]) parameter(0)
get-tuple-element.1 = f32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = f32[] constant(1)
constant.2 = f32[] constant(2)
constant.3 = f32[] constant(3)
constant.4 = f32[] constant(4)
constant.5 = f32[] constant(5)
add.1 = f32[] add(get-tuple-element.1, constant.1)
add.2 = f32[] add(add.1, constant.2)
add.3 = f32[] add(add.1, constant.3)
add.4 = f32[] add(add.3, constant.5)
multiply.1 = f32[] multiply(add.2, constant.4)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.1, add.4)
}
on_false {
arg_tuple.2 = (f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(arg_tuple.2), index=0
constant.6 = f32[] constant(1)
constant.7 = f32[] constant(2)
constant.8 = f32[] constant(3)
constant.9 = f32[] constant(4)
constant.10 = f32[] constant(5)
add.4 = f32[] add(get-tuple-element.2, constant.6)
sub.1 = f32[] subtract(add.4, constant.7)
add.5 = f32[] add(add.4, constant.8)
add.6 = f32[] add(add.5, constant.10)
multiply.2 = f32[] multiply(sub.1, constant.9)
ROOT tuple.6 = (f32[], f32[]) tuple(multiply.2, add.6)
}
ENTRY main {
pred.1 = pred[] parameter(0)
tuple.1 = (f32[]) parameter(1)
tuple.2 = (f32[]) parameter(2)
ROOT conditional = (f32[], f32[])
conditional(pred.1, tuple.1, tuple.2), true_computation=on_true,
false_computation=on_false
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, LayoutMisMatchCannotMovedOut) {
absl::string_view hlo_string =
R"(
HloModule LayoutMisMatchCannotMovedOut
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
%arg_tuple.1 = (bf16[93184,4]{1,0}) parameter(0)
%get-tuple-element.1 = bf16[93184,4]{1,0} get-tuple-element(%arg_tuple.1), index=0
%all-reduce.1 = bf16[93184,4]{1,0}
all-reduce(bf16[93184,4]{1,0} %get-tuple-element.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64
%convert.2894 = f32[93184,4]{1,0} convert(bf16[93184, 4]{1,0} %all-reduce.1)
ROOT %tuple.1 = (f32[93184,4]{1,0}) tuple(%convert.2894)
}
on_false {
%arg_tuple.2 = (bf16[93184,4]{1,0}) parameter(0)
%get-tuple-element.3 = bf16[93184,4]{1,0} get-tuple-element(%arg_tuple.2), index=0
%copy.1 = bf16[93184,4]{0,1} copy(bf16[93184,4]{1,0} %get-tuple-element.3)
%all-reduce.2 = bf16[93184,4]{0, 1}
all-reduce(bf16[93184,4]{0, 1} %copy.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181
%convert.3604 = f32[93184,4]{0,1} convert(bf16[93184,4]{0,1} %all-reduce.2)
ROOT %tuple.2 = (f32[93184,4]{0,1}) tuple(%convert.3604)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.11 = (bf16[93184,4]{1,0}) parameter(1)
arg_tuple.22 = (bf16[93184,4]{1,0}) parameter(2)
conditional = (f32[93184,4]{1,0}) conditional(pred.1, arg_tuple.11, arg_tuple.22), true_computation=on_true, false_computation=on_false
get-first-index = f32[93184,4]{1,0} get-tuple-element(conditional), index=0
ROOT result = (f32[93184,4]{1,0}) tuple(get-first-index)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(ConditionalCodeMotionTest, MoveCrossModuleAllReduceOut) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.1 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.1),
channel_id=188, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.64, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.1 = f32[3,3,128,128] convert(bf16[3,3,128,128] %all-reduce.1),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT tuple.1 = (f32[3,3,128,128]) tuple(convert.1)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
all-reduce.2 = bf16[3,3,128,128]
all-reduce(bf16[3,3,128,128] %convolution.2),
channel_id=485, replica_groups={{0,1}}, use_global_device_ids=true,
to_apply=%add.181, metadata={op_type="Conv2DBackpropFilter"
op_name="gradients/resnet50/conv2d_22/Conv2D_grad/Conv2DBackpropFilter"}
convert.2 = f32[3,3,128,128]
convert(bf16[3,3,128,128] %all-reduce.2),
metadata={op_type="Cast" op_name="Cast_15"}
ROOT tuple.2 = (f32[3,3,128,128]) tuple(convert.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
arg_tuple.5 = f32[3,3,128,128] parameter(3)
conditional = (f32[3,3,128,128])
conditional(pred.1, arg_tuple.3, arg_tuple.4), true_computation=on_true,
false_computation=on_false
get-first-index = f32[3,3,128,128]
get-tuple-element(conditional), index=0
add.1 = f32[3,3,128,128] add(f32[3,3,128,128] get-first-index, f32[3,3,128,128] get-first-index)
ROOT result = (f32[3,3,128,128]) tuple(add.1)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
ConditionalCodeMotion pass(true, true);
ASSERT_TRUE(pass.Run(&*module).value());
const HloInstruction* conditional =
FindInstruction(module.get(), "conditional");
CHECK(conditional != nullptr);
const HloComputation* on_true = conditional->branch_computation(0);
ASSERT_EQ(on_true->instruction_count(), 5);
const HloComputation* on_false = conditional->branch_computation(1);
ASSERT_EQ(on_false->instruction_count(), 5);
ASSERT_TRUE(ShapeUtil::Compatible(
conditional->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(
BF16, {3, 3, 128, 128})})));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
AllOf(op::Tuple(op::Add(
op::Convert(op::AllReduce(op::GetTupleElement(op::Conditional()))),
op::Convert(
op::AllReduce(op::GetTupleElement(op::Conditional())))))));
}
TEST_F(ConditionalCodeMotionTest, DoNotMoveAllReduceIn) {
absl::string_view hlo_string =
R"(
HloModule RemoveIdenticalInstruction
%add.64 (x.139: bf16[], y.139: bf16[]) -> bf16[] {
%x.139 = bf16[]{:T(512)} parameter(0)
%y.139 = bf16[]{:T(512)} parameter(1)
ROOT %add.44073 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.139, bf16[]{:T(512)} %y.139)
}
%add.181 (x.256: bf16[], y.256: bf16[]) -> bf16[] {
%x.256 = bf16[]{:T(512)} parameter(0)
%y.256 = bf16[]{:T(512)} parameter(1)
ROOT %add.44842 = bf16[]{:T(512)} add(bf16[]{:T(512)} %x.256, bf16[]{:T(512)} %y.256)
}
on_true {
arg_tuple.1 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(0)
get-tuple-element.11 = bf16[2,54,168,128] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.12 = bf16[2,52,168,128] get-tuple-element(arg_tuple.1), index=1
convolution.1 = bf16[3,3,128,128] convolution(bf16[2,54,168,128]
get-tuple-element.11, bf16[2,52,168,128]
get-tuple-element.12), window={size=52x168 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
add.1 = bf16[3,3,128,128] add(bf16[3,3,128,128] convolution.1, bf16[3,3,128,128] convolution.1)
ROOT tuple.1 = (bf16[3,3,128,128]) tuple(add.1)
}
on_false {
arg_tuple.2 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(0)
get-tuple-element.21 = bf16[2,86,104,128]
get-tuple-element(arg_tuple.2), index=0
get-tuple-element.22 = bf16[2,84,104,128]
get-tuple-element(arg_tuple.2), index=1
convolution.2 = bf16[3,3,128,128]
convolution(bf16[2,86,104,128] get-tuple-element.21, bf16[2,84,104,128]
get-tuple-element.22), window={size=84x104 pad=0_0x1_1},
dim_labels=f01b_i01o->01bf
add.2 = bf16[3,3,128,128] add(bf16[3,3,128,128] convolution.2, bf16[3,3,128,128] convolution.2)
ROOT tuple.2 = (bf16[3,3,128,128]) tuple(add.2)
}
ENTRY main {
pred.1 = pred[] parameter(0)
arg_tuple.3 = (bf16[2,54,168,128], bf16[2,52,168,128]) parameter(1)
arg_tuple.4 = (bf16[2,86,104,128], bf16[2,84,104,128]) parameter(2)
arg_tuple.5 = f32[3,3,128,128] parameter(3)
conditional |
1,926 | cpp | tensorflow/tensorflow | while_loop_concat_code_motion | third_party/xla/xla/service/while_loop_concat_code_motion.cc | third_party/xla/xla/service/while_loop_concat_code_motion_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_CONCAT_CODE_MOTION_H_
#define XLA_SERVICE_WHILE_LOOP_CONCAT_CODE_MOTION_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class WhileLoopConcatCodeMotion : public HloModulePass {
public:
explicit WhileLoopConcatCodeMotion(int64_t min_operand_count_to_optimize)
: min_operand_count_to_optimize_(min_operand_count_to_optimize) {}
~WhileLoopConcatCodeMotion() override = default;
absl::string_view name() const override {
return "while-loop-concat-code-motion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const int64_t min_operand_count_to_optimize_;
};
}
#endif
#include "xla/service/while_loop_concat_code_motion.h"
#include <map>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
struct ConcatGroup {
ConcatGroup(std::vector<HloInstruction*> elements, int64_t concat_dim,
bool inserted_concat_dim)
: elements(std::move(elements)),
element_sizes(this->elements.size(), 1),
element_offsets(this->elements.size(), 0),
concat_dim(concat_dim),
inserted_concat_dim(inserted_concat_dim) {
if (inserted_concat_dim) {
absl::c_iota(element_offsets, 0);
} else {
for (int64_t i = 0; i < element_sizes.size(); ++i) {
element_sizes[i] = this->elements[i]->shape().dimensions(concat_dim);
if (i > 0) {
element_offsets[i] = element_offsets[i - 1] + element_sizes[i - 1];
}
}
}
}
Shape GetConcatShape() const {
if (inserted_concat_dim) {
std::vector<int64_t> dims;
const Shape& element_shape = elements.back()->shape();
dims.reserve(element_shape.rank() + 1);
for (int64_t i = 0; i < element_shape.rank(); ++i) {
if (i == concat_dim) {
dims.push_back(elements.size());
}
dims.push_back(element_shape.dimensions(i));
}
if (dims.size() == concat_dim) {
dims.push_back(elements.size());
}
return ShapeUtil::MakeShape(element_shape.element_type(), dims);
} else {
int64_t dim_size = 0;
for (int64_t size : element_sizes) {
dim_size += size;
}
Shape shape = elements.back()->shape();
shape.set_dimensions(concat_dim, dim_size);
return shape;
}
}
HloInstruction* CreateSlice(HloInstruction* full_data, int64_t element_index,
HloComputation* comp) const {
Shape shape = full_data->shape();
shape.set_dimensions(concat_dim, element_sizes[element_index]);
std::vector<int64_t> starts(shape.rank(), 0);
std::vector<int64_t> limits(shape.dimensions().begin(),
shape.dimensions().end());
starts[concat_dim] = element_offsets[element_index];
limits[concat_dim] += starts[concat_dim];
auto slice = comp->AddInstruction(
HloInstruction::CreateSlice(shape, full_data, starts, limits,
std::vector<int64_t>(shape.rank(), 1)));
if (!inserted_concat_dim) {
return slice;
}
std::vector<int64_t> element_shape;
element_shape.reserve(shape.rank() - 1);
for (int64_t i = 0; i < shape.rank(); ++i) {
if (i != concat_dim) {
element_shape.push_back(shape.dimensions(i));
}
}
return comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(shape.element_type(), element_shape), slice));
}
HloInstruction* CreateConcat(std::vector<HloInstruction*> input_elements,
HloComputation* comp) const {
if (inserted_concat_dim) {
for (int64_t i = 0; i < input_elements.size(); ++i) {
std::vector<int64_t> element_shape;
element_shape.reserve(input_elements[i]->shape().rank() + 1);
for (int64_t j = 0; j < input_elements[i]->shape().rank(); ++j) {
if (j == concat_dim) {
element_shape.push_back(1);
}
element_shape.push_back(input_elements[i]->shape().dimensions(j));
}
if (element_shape.size() == concat_dim) {
element_shape.push_back(1);
}
input_elements[i] = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(input_elements[i]->shape().element_type(),
element_shape),
input_elements[i]));
}
}
return comp->AddInstruction(HloInstruction::CreateConcatenate(
GetConcatShape(), input_elements, concat_dim));
}
std::vector<HloInstruction*> elements;
std::vector<int64_t> element_sizes;
std::vector<int64_t> element_offsets;
int64_t concat_dim;
bool inserted_concat_dim;
};
class ConcatGroups {
public:
std::optional<std::pair<int64_t, int64_t>> GetGroupIndex(
const HloInstruction* hlo) const {
auto it = element_to_group_.find(hlo);
if (it == element_to_group_.end()) {
return std::nullopt;
}
return it->second;
}
const ConcatGroup& GetGroup(int64_t index) const { return groups_[index]; }
std::pair<bool, int64_t> MaybeCreateNewGroup(ConcatGroup group) {
int64_t group_id = -1;
absl::flat_hash_set<HloInstruction*> elements_dedup;
for (int64_t i = 0; i < group.elements.size(); ++i) {
if (!elements_dedup.insert(group.elements[i]).second) {
VLOG(2) << "Duplicates in group. Element: "
<< group.elements[i]->ToString();
}
if (concat_disallowed_.contains(group.elements[i])) {
VLOG(2) << "Failed creating group. Grouping disallowed on "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
auto existing = GetGroupIndex(group.elements[i]);
if (existing.has_value() &&
(i != existing->second ||
groups_[existing->first].concat_dim != group.concat_dim)) {
VLOG(2)
<< "Failed creating group. Different than existing group. Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
if (i == 0 && existing.has_value()) {
group_id = existing->first;
}
if (i > 0) {
if (existing.has_value() && existing->first != group_id) {
VLOG(2) << "Failed creating group. Different than existing group. "
"Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
if (!existing.has_value() && group_id >= 0) {
VLOG(2) << "Failed creating group. Different than existing group. "
"Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
}
}
if (group_id >= 0) {
VLOG(2) << "Group already exists at " << group_id << " for "
<< group.elements[0]->ToString();
return std::pair<bool, int64_t>(false, group_id);
}
int64_t index = groups_.size();
for (int64_t i = 0; i < group.elements.size(); ++i) {
element_to_group_[group.elements[i]] =
std::pair<int64_t, int64_t>(index, i);
}
VLOG(2) << "Created new group at " << index << " for "
<< group.elements[0]->ToString()
<< ", concat_dim: " << group.concat_dim
<< ", inserted: " << group.inserted_concat_dim;
groups_.push_back(std::move(group));
return std::pair<bool, int64_t>(true, index);
}
const std::vector<ConcatGroup>& Groups() const { return groups_; }
int64_t NextGroupIndex() const { return groups_.size(); }
void RemoveTailingGroups(int64_t start_index) {
while (groups_.size() > start_index) {
for (auto element : groups_.back().elements) {
element_to_group_.erase(element);
}
groups_.pop_back();
}
}
void DisallowGroupingOn(const HloInstruction* hlo) {
VLOG(2) << "Disallow grouping on " << hlo->ToString();
concat_disallowed_.insert(hlo);
}
private:
absl::flat_hash_map<const HloInstruction*, std::pair<int64_t, int64_t>>
element_to_group_;
std::vector<ConcatGroup> groups_;
absl::flat_hash_set<const HloInstruction*> concat_disallowed_;
};
std::optional<std::pair<int64_t, bool>> GetOperandConcatDim(
const HloInstruction* hlo, int64_t operand_index, int64_t hlo_concat_dim,
bool hlo_inserted_concat_dim,
const ConcatGroup* combined_operand_group = nullptr) {
if (hlo->IsElementwise() || hlo->opcode() == HloOpcode::kAllReduce) {
return std::pair<int64_t, bool>(hlo_concat_dim, hlo_inserted_concat_dim);
}
int64_t operand_concat_dim = -1;
bool operand_inserted_concat_dim = false;
const Shape& operand_shape =
combined_operand_group == nullptr
? hlo->operand(operand_index)->shape()
: combined_operand_group->elements.back()->shape();
if (hlo->opcode() == HloOpcode::kBroadcast) {
operand_concat_dim = 0;
operand_inserted_concat_dim = true;
int64_t min_dist_to_concat_dim = hlo->shape().rank();
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (hlo->dimensions(i) == hlo_concat_dim) {
operand_concat_dim = i;
operand_inserted_concat_dim = hlo_inserted_concat_dim;
break;
}
if (hlo->dimensions(i) < hlo_concat_dim &&
min_dist_to_concat_dim > hlo_concat_dim - hlo->dimensions(i)) {
operand_concat_dim = i + 1;
min_dist_to_concat_dim = hlo_concat_dim - hlo->dimensions(i);
}
if (hlo->dimensions(i) > hlo_concat_dim &&
min_dist_to_concat_dim > hlo->dimensions(i) - hlo_concat_dim) {
operand_concat_dim = i;
min_dist_to_concat_dim = hlo->dimensions(i) - hlo_concat_dim;
}
}
} else if (hlo->opcode() == HloOpcode::kReduce) {
if (operand_index != 0) {
return std::nullopt;
}
operand_concat_dim = hlo_concat_dim;
operand_inserted_concat_dim = hlo_inserted_concat_dim;
std::set<int64_t> sorted_reduce_dims;
for (int64_t dim : hlo->dimensions()) {
sorted_reduce_dims.insert(dim);
}
for (int64_t dim : sorted_reduce_dims) {
if ((hlo_inserted_concat_dim && dim < operand_concat_dim) ||
(!hlo_inserted_concat_dim && dim <= operand_concat_dim)) {
operand_concat_dim++;
}
}
} else if (hlo->opcode() == HloOpcode::kReshape) {
int64_t i = 0;
int64_t j = 0;
operand_inserted_concat_dim = false;
while (i < operand_shape.rank() || j <= hlo_concat_dim) {
if (i < operand_shape.rank() && j < hlo->shape().rank() &&
operand_shape.dimensions(i) == hlo->shape().dimensions(j)) {
if (j == hlo_concat_dim) {
operand_inserted_concat_dim =
hlo_inserted_concat_dim && operand_shape.dimensions(i) != 1;
operand_concat_dim = i;
break;
}
i++;
j++;
continue;
}
if (i < operand_shape.rank() && operand_shape.dimensions(i) == 1) {
if (j == hlo_concat_dim && hlo_inserted_concat_dim) {
operand_concat_dim = i;
break;
}
i++;
continue;
}
if (j == hlo_concat_dim) {
operand_concat_dim = i;
operand_inserted_concat_dim = true;
break;
}
if (j < hlo->shape().rank() && hlo->shape().dimensions(j) == 1) {
j++;
continue;
}
return std::nullopt;
}
} else {
return std::nullopt;
}
CHECK_GE(operand_concat_dim, 0);
return std::pair<int64_t, bool>(operand_concat_dim,
operand_inserted_concat_dim);
}
void ModifyHloPropertiesForConcatShape(const ConcatGroup& group,
HloInstruction* hlo) {
*hlo->mutable_shape() = group.GetConcatShape();
if (hlo->opcode() == HloOpcode::kBroadcast) {
auto operand_dim = GetOperandConcatDim(
group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);
CHECK(operand_dim.has_value());
int64_t operand_concat_dim = operand_dim->first;
bool operand_inserted_concat_dim = operand_dim->second;
if (operand_inserted_concat_dim) {
CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size() + 1)
<< hlo->ToString();
} else {
CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size());
}
std::vector<int64_t> dims;
const int64_t rank = hlo->operand(0)->shape().rank();
dims.reserve(rank);
for (int64_t i = 0; i < rank; ++i) {
if (i == operand_concat_dim && operand_inserted_concat_dim) {
dims.push_back(group.concat_dim);
} else {
if (i > operand_concat_dim && operand_inserted_concat_dim) {
dims.push_back(hlo->dimensions(i - 1));
} else {
dims.push_back(hlo->dimensions(i));
}
if (group.inserted_concat_dim && dims.back() >= group.concat_dim) {
dims.back()++;
}
}
}
*hlo->mutable_dimensions() = std::move(dims);
} else if (hlo->opcode() == HloOpcode::kReduce) {
auto operand_dim = GetOperandConcatDim(
group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);
int64_t operand_concat_dim = operand_dim->first;
bool operand_inserted_concat_dim = operand_dim->second;
CHECK(operand_dim.has_value());
if (operand_inserted_concat_dim) {
auto dims = hlo->mutable_dimensions();
for (int64_t i = 0; i < dims->size(); ++i) {
if ((*dims)[i] >= operand_concat_dim) {
(*dims)[i]++;
}
}
}
}
}
bool GroupHlosForConcat(
HloComputation* body, HloInstruction* concat,
absl::flat_hash_map<const HloInstruction*, int64_t> topological_order,
ConcatGroups* groups) {
const int64_t group_size = concat->operand_count();
absl::flat_hash_set<int64_t> used_groups;
auto root_tuple = body->root_instruction();
CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple);
absl::flat_hash_map<HloInstruction*, int64_t> root_tuple_element_use_count;
for (auto operand : root_tuple->operands()) {
root_tuple_element_use_count.emplace(operand, 0).first->second++;
}
std::multimap<int64_t, ConcatGroup> pq;
const int64_t first_group_id_to_create = groups->NextGroupIndex();
auto fail_and_cleanup = [&] {
VLOG(1) << "Failed to get the subcomputation to optimize for "
<< concat->ToString() << ", clear groups starting at "
<< first_group_id_to_create;
groups->RemoveTailingGroups(first_group_id_to_create);
return false;
};
struct GroupUse {
int64_t group_id;
bool newly_created;
bool already_used_by_subcomp;
};
auto maybe_create_group = [&](ConcatGroup group) {
auto res = groups->MaybeCreateNewGroup(std::move(group));
GroupUse use{res.second, false, false};
if (res.second < 0) {
return use;
}
use.newly_created = res.first;
use.already_used_by_subcomp = !used_groups.insert(res.second).second;
return use;
};
std::vector<HloInstruction*> concat_operands(concat->operands().begin(),
concat->operands().end());
int64_t concat_operand_order = -topological_order[concat_operands[0]];
pq.emplace(concat_operand_order,
ConcatGroup(std::move(concat_operands),
concat->concatenate_dimension(), false));
while (!pq.empty()) {
auto group = std::move(pq.begin()->second);
pq.erase(pq.begin());
const auto& hlos = group.elements;
VLOG(2) << "GroupHlosForConcat dequeued " << hlos[0]->ToString();
bool group_is_param_gtes = false;
if (absl::c_all_of(hlos, [&](const HloInstruction* element) {
return element == hlos[0];
})) {
if (groups->GetGroupIndex(hlos[0]).has_value()) {
VLOG(1) << "We do not support the case if a shared operand also part "
"of a group: "
<< hlos[0]->ToString();
return fail_and_cleanup();
}
groups->DisallowGroupingOn(hlos[0]);
continue;
}
if (absl::c_all_of(hlos, [&](const HloInstruction* element) {
return element->opcode() == HloOpcode::kGetTupleElement &&
element->operand(0) == body->parameter_instruction(0);
})) {
group_is_param_gtes = true;
} else if (((hlos[0]->IsElementwise() ||
hlos[0]->opcode() == HloOpcode::kAllReduce) &&
!hlos[0]->HasSideEffect()) ||
hlos[0]->opcode() == HloOpcode::kBroadcast ||
hlos[0]->opcode() == HloOpcode::kReduce ||
hlos[0]->opcode() == HloOpcode::kReshape ||
hlos[0]->IsCustomCall("Sharding")) {
if (hlos[0]->opcode() == HloOpcode::kAllReduce &&
(!hlos[0]->shape().IsArray() || hlos[0]->IsCrossModuleAllReduce())) {
VLOG(2) << "Unsupported allreduce: " << hlos[0]->ToString();
return fail_and_cleanup();
}
if (absl::c_any_of(hlos, [&](const HloInstruction* element) {
auto eq_operand = [](const HloInstruction* a,
const HloInstruction* b) {
return ShapeUtil::Compatible(a->shape(), b->shape());
};
auto eq_computations = [](const HloComputation* lhs,
const HloComputation* rhs) {
return lhs->Equal(*rhs, false);
};
if (!hlos[0]->Identical(*element, eq_operand, eq_computations,
false)) {
return true;
}
if (element->opcode() == HloOpcode::kReduce &&
(element->operand_count() != 2 ||
element->operand(1) != hlos[0]->operand(1))) {
return true;
}
return false;
})) {
VLOG(2) << "Different types of elements. First element: "
<< hlos[0]->ToString();
return fail_and_cleanup();
}
int64_t input_count = hlos[0]->operand_count();
if (hlos[0]->opcode() == HloOpcode::kReduce) {
CHECK_EQ(input_count, 2);
input_count = 1;
}
for (int64_t i = 0; i < input_count; ++i) {
std::vector<HloInstruction*> elements(group_size);
for (int64_t j = 0; j < group_size; ++j) {
elements[j] = hlos[j]->mutable_operand(i);
}
auto maybe_new_concat_dim = GetOperandConcatDim(
hlos[0], i, group.concat_dim, group.inserted_concat_dim);
if (!maybe_new_concat_dim.has_value()) {
VLOG(2) << "Cannot find operand concat dimension for operand " << i
<< " of " << hlos[0]->ToString();
return fail_and_cleanup();
}
int64_t new_group_concat_dim = maybe_new_concat_dim->first;
bool inserted_concat_dim = maybe_new_concat_dim->second;
int64_t element_order = -topological_order[elements[0]];
pq.emplace(element_order,
ConcatGroup(std::move(elements), new_group_concat_dim,
inserted_concat_dim));
}
} else if (hlos[0]->opcode() == HloOpcode::kSlice) {
int64_t offset = 0;
auto operand = hlos[0]->operand(0);
if (group.inserted_concat_dim) {
VLOG(2) << "Slices cannot be grouped on new dimension.";
return fail_and_cleanup();
}
if (groups->GetGroupIndex(operand).has_value()) {
return fail_and_cleanup();
}
groups->DisallowGroupingOn(operand);
for (int64_t i = 0; i < group_size; ++i) {
if (hlos[i]->operand(0) != operand) {
VLOG(2) << "Slices of different operands.";
return fail_and_cleanup();
}
for (int64_t j = 0; j < hlos[i]->shape().rank(); ++j) {
if (hlos[i]->slice_strides(j) != 1) {
VLOG(2) << "Slices with strides.";
return fail_and_cleanup();
}
if (j == group.concat_dim) {
if (hlos[i]->slice_starts(j) != offset) {
VLOG(2) << "Slices with unsupported offsets.";
return fail_and_cleanup();
}
offset += hlos[i]->shape().dimensions(j);
} else {
if (hlos[i]->slice_starts(j) != 0 ||
hlos[i]->slice_limits(j) != operand->shape().dimensions(j)) {
VLOG(2) << "Slice with unsupported offsets at dimension " << j
<< ", " << hlos[i]->ToString();
return fail_and_cleanup();
}
}
}
}
if (offset != operand->shape().dimensions(group.concat_dim)) {
VLOG(2) << "Slices with unsupported sizes.";
return fail_and_cleanup();
}
} else {
VLOG(2) << "Unsupported opcode: " << hlos[0]->ToString();
return fail_and_cleanup();
}
auto guse = maybe_create_group(std::move(group));
if (guse.group_id < 0) {
VLOG(2) << "Failed to create group.";
return fail_and_cleanup();
}
const auto& registered_group = groups->GetGroup(guse.group_id);
if (!guse.already_used_by_subcomp && group_is_param_gtes) {
std::vector<HloInstruction*> new_outputs(group_size);
for (int64_t i = 0; i < group_size; ++i) {
new_outputs[i] = root_tuple->mutable_operand(
registered_group.elements[i]->tuple_index());
}
int64_t new_output_order = -topological_order[new_outputs[0]];
pq.emplace(
new_output_order,
ConcatGroup(std::move(new_outputs), registered_group.concat_dim,
registered_group.inserted_concat_dim));
}
}
return groups->Groups().size() > first_group_id_to_create;
}
std::vector<bool> TupleElementsUsedInCond(HloInstruction* loop) {
std::vector<bool> result(loop->shape().tuple_shapes_size(), false);
for (auto user : loop->while_condition()->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
absl::c_fill(result, true);
return result;
}
result[user->tuple_index()] = true;
}
return result;
}
absl::Status AddCopiesToRoot(HloComputation* body,
absl::Span<HloInstruction* const> param_gtes,
ConcatGroups* groups) {
auto root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
std::vector<HloInstruction*> copies(root->operand_count(), nullptr);
for (int64_t i = 0; i < copies.size(); ++i) {
auto element = root->mutable_operand(i);
if (!element->shape().IsArray()) {
continue;
}
copies[i] = body->AddInstruction(HloInstruction::CreateUnary(
element->shape(), HloOpcode::kCopy, element));
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copies[i]));
}
for (int64_t i = 0; i < copies.size(); ++i) {
auto copy = copies[i];
if (groups->GetGroupIndex(copy).has_value()) {
continue;
}
auto param_group_index = groups->GetGroupIndex(param_gtes[i]);
if (!param_group_index.has_value()) {
continue;
}
const auto& param_group = groups->GetGroup(param_group_index->first);
std::vector<HloInstruction*> copy_group(param_group.elements.size());
for (int64_t j = 0; j < copy_group.size(); ++j) {
copy_group[j] = copies[param_group.elements[j]->tuple_index()];
}
CHECK(groups
->MaybeCreateNewGroup(
ConcatGroup(std::move(copy_group), param_group.concat_dim,
param_group.inserted_concat_dim))
.first);
}
return absl::OkStatus();
}
absl::Status RemoveCopiesFromRoot(HloComputation* body) {
auto root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
for (int64_t i = 0; i < root->operand_count(); ++i) {
auto copy = root->mutable_operand(i);
if (copy->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copy->mutable_operand(0)));
}
}
return absl::OkStatus();
}
absl::Status RewriteLoopWithConcatGroups(
HloInstruction* loop, absl::Span<HloInstruction* const> param_gtes,
ConcatGroups& groups) {
VLOG(1) << "RewriteLoopWithConcatGroups with " << groups.Groups().size()
<< " groups.";
absl::flat_hash_set<int64_t> processed_groups;
auto body = loop->while_body();
auto param = body->parameter_instruction(0);
auto cond_param = loop->while_condition()->parameter_instruction(0);
std::vector<HloInstruction*> init_elements(loop->shape().tuple_shapes_size());
for (int64_t i = 0; i < param_gtes.size(); ++i) {
init_elements[i] =
loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
loop->shape().tuple_shapes(i), loop->mutable_operand(0), i));
}
for (int64_t i = 0; i < param_gtes.size(); ++i) {
const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
*param_gtes[i]->mutable_shape() = group.GetConcatShape();
*param->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();
*body->root_instruction()->mutable_shape()->mutable_tuple_shapes(i) =
param_gtes[i]->shape();
*cond_param->mutable_shape()->mutable_tuple_shapes(i) =
param_gtes[i]->shape();
*loop->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();
processed_groups.insert(group_and_index->first);
std::vector<HloInstruction*> input_concat_elements;
input_concat_elements.reserve(group.elements.size());
for (auto param_gte : group.elements) {
input_concat_elements.push_back(init_elements[param_gte->tuple_index()]);
}
init_elements[i] = | #include "xla/service/while_loop_concat_code_motion.h"
#include <algorithm>
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileLoopConcatCodeMotionTest : public HloTestBase {};
TEST_F(WhileLoopConcatCodeMotionTest, SimpleMotion) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%ccall2 = f32[1024,1024] custom-call(), custom_call_target="test2"
%add.0 = f32[1024,1024] add(%slice.0, %ccall2)
%add.1 = f32[1024,1024] add(%slice.1, %ccall2)
%t0 = token[] after-all()
%outfeed = token[] outfeed(%slice.1, %t0)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1)))));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
auto while_op =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(while_op->while_body()->root_instruction(),
op::Tuple(op::Add(),
op::Add(op::CustomCall(),
op::Reshape(op::Broadcast(op::CustomCall())))));
}
TEST_F(WhileLoopConcatCodeMotionTest, NoMotionWithChangedElementOrder) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %slice.1, %slice.0)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_FALSE(changed);
}
TEST_F(WhileLoopConcatCodeMotionTest, CascadedConcats) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %gte.3)
%add.1 = f32[1024,1024] add(%slice.1, %gte.4)
%add.2 = f32[1024,1024] add(%gte.3, %gte.3)
%add.3 = f32[1024,1024] add(%gte.4, %gte.4)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %add.2, %add.3)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1))),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsSharedGroups) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}
%ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test"
%slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}
%slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %slice.2)
%add.1 = f32[1024,1024] add(%slice.1, %slice.3)
%sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)
%sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1))),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsDifferentOrders) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}
%ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test"
%slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}
%slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %slice.3)
%add.1 = f32[1024,1024] add(%slice.1, %slice.2)
%sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)
%sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(), op::Parameter(0), op::Parameter(1),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::GetTupleElement(loop),
op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, NonElementwiseOps) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%reshape.0 = f32[1,1024,1024] reshape(%gte.1)
%reshape.1 = f32[1,1024,1024] reshape(%gte.2)
%concat = f32[2,1024,1024] concatenate(%reshape.0, %reshape.1), dimensions={0}
%ccall = f32[2,1024,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1,1024,1024] slice(%ccall), slice={[0:1], [0:1024], [0:1024]}
%slice.1 = f32[1,1024,1024] slice(%ccall), slice={[1:2], [0:1024], [0:1024]}
%reshape.2 = f32[1024,1024] reshape(%slice.0 )
%reshape.3 = f32[1024,1024] reshape(%slice.1)
%gte.3 = f32[1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024] get-tuple-element(%param), index=4
%constant.0 = f32[] constant(0)
%reduce.0 = f32[1024] reduce(%reshape.0, %constant.0), to_apply=%sum, dimensions={0,1}
%reduce.1 = f32[1024] reduce(%reshape.1, %constant.0), to_apply=%sum, dimensions={0,1}
%add.0 = f32[1024] add(%reduce.0, %gte.3)
%add.1 = f32[1024] add(%reduce.1, %gte.4)
%br0 = f32[1024,1024] broadcast(%add.0), dimensions={1}
%br1 = f32[1024,1024] broadcast(%add.1), dimensions={1}
%sub.0 = f32[1024,1024] subtract(%reshape.2, %br0)
%sub.1 = f32[1024,1024] subtract(%reshape.3, %br1)
%gte.5 = f32[1] get-tuple-element(%param), index=5
%gte.6 = f32[1] get-tuple-element(%param), index=6
%reshape.4 = f32[] reshape(%gte.5)
%reshape.5 = f32[] reshape(%gte.6)
%br2 = f32[1024] broadcast(%reshape.4), dimensions={}
%br3 = f32[1024] broadcast(%reshape.5), dimensions={}
%add.2 = f32[1024] add(%add.0, %br2)
%add.3 = f32[1024] add(%add.1, %br3)
%inc0 = f32[] add(%constant.0, %reshape.4)
%inc1 = f32[] add(%constant.0, %reshape.5)
%reshape.6 = f32[1] reshape(%inc0)
%reshape.7 = f32[1] reshape(%inc1)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
tuple(%increment_iteration, %sub.0, %sub.1, %add.2, %add.3, %reshape.6, %reshape.7)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024] parameter(2)
%param.3 = f32[1024] parameter(3)
%param.4 = f32[1] parameter(4)
%param.5 = f32[1] parameter(5)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3, %param.4, %param.5)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2,1024,1024]"),
op::Concatenate(op::Reshape(op::Parameter(0)),
op::Reshape(op::Parameter(1)))),
AllOf(op::Shape("f32[2,1024]"),
op::Concatenate(op::Reshape(op::Parameter(2)),
op::Reshape(op::Parameter(3)))),
AllOf(op::Shape("f32[2]"),
op::Concatenate(op::Parameter(4), op::Parameter(5)))));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
}
} |
1,927 | cpp | tensorflow/tensorflow | gpu_compilation_environment | third_party/xla/xla/service/gpu_compilation_environment.cc | third_party/xla/xla/service/gpu_compilation_environment_test.cc | #ifndef XLA_SERVICE_GPU_COMPILATION_ENVIRONMENT_H_
#define XLA_SERVICE_GPU_COMPILATION_ENVIRONMENT_H_
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/xla.pb.h"
namespace xla {
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromFlagStrings(
std::vector<std::string>& flags, bool strict);
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromEnvVar();
GpuCompilationEnvironment CreateGpuCompEnvWithDefaultValues();
absl::Status InitializeMissingFieldsFromXLAFlags(
GpuCompilationEnvironment& env);
}
#endif
#include "xla/service/gpu_compilation_environment.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "xla/parse_flags_from_env.h"
#include "xla/service/compilation_environments.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
void InitializeFlagsForGpuCompEnv(std::vector<tsl::Flag>* flag_list,
GpuCompilationEnvironment* gpu_comp_env) {
auto int64_setter_for =
[gpu_comp_env](
void (GpuCompilationEnvironment::*member_setter)(int64_t)) {
return [gpu_comp_env, member_setter](int64_t value) {
(gpu_comp_env->*member_setter)(value);
return true;
};
};
flag_list->push_back(tsl::Flag(
"dummy_flag",
int64_setter_for(&GpuCompilationEnvironment::set_dummy_flag),
gpu_comp_env->dummy_flag(), "Dummy flag to demonstrate the flow"));
}
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromFlagStrings(
std::vector<std::string>& flags, bool strict) {
GpuCompilationEnvironment gpu_comp_env;
std::vector<tsl::Flag> flag_objects;
InitializeFlagsForGpuCompEnv(&flag_objects, &gpu_comp_env);
bool result = tsl::Flags::Parse(flags, flag_objects);
if (!result || (strict && !flags.empty())) {
return InvalidArgument("Could not parse flags: %s",
absl::StrJoin(flags, ", "));
}
return gpu_comp_env;
}
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromEnvVar() {
GpuCompilationEnvironment env;
std::vector<tsl::Flag> flag_objects;
InitializeFlagsForGpuCompEnv(&flag_objects, &env);
ParseFlagsFromEnvAndIgnoreUnknown("XLA_FLAGS", flag_objects);
return env;
}
GpuCompilationEnvironment CreateGpuCompEnvWithDefaultValues() {
GpuCompilationEnvironment env;
env.set_dummy_flag(1);
return env;
}
absl::Status InitializeMissingFieldsFromXLAFlags(
GpuCompilationEnvironment& env) {
TF_ASSIGN_OR_RETURN(GpuCompilationEnvironment from_env,
CreateGpuCompEnvFromEnvVar());
auto default_env = CreateGpuCompEnvWithDefaultValues();
auto reflection = env.GetReflection();
auto reflection_from_env = from_env.GetReflection();
auto descriptor = GpuCompilationEnvironment::descriptor();
std::vector<const tsl::protobuf::FieldDescriptor*> missing_fields;
for (int j = 0; j < descriptor->field_count(); ++j) {
const tsl::protobuf::FieldDescriptor* field = descriptor->field(j);
if (reflection->HasField(env, field) &&
reflection_from_env->HasField(from_env, field)) {
return InvalidArgument(
"Flag %s is set in both XLA_FLAGS env var and "
"GpuCompilationEnvironment.",
field->name());
} else if (!reflection->HasField(env, field) &&
!reflection_from_env->HasField(from_env, field)) {
missing_fields.push_back(field);
}
}
env.MergeFrom(from_env);
if (!missing_fields.empty()) {
reflection->SwapFields(&env, &default_env, missing_fields);
}
return absl::OkStatus();
}
namespace {
absl::StatusOr<std::unique_ptr<tsl::protobuf::Message>>
ProcessNewGpuCompilationEnvironment(
std::unique_ptr<tsl::protobuf::Message> env) {
if (!env) {
env = std::make_unique<GpuCompilationEnvironment>();
}
return env;
}
}
}
static bool InitModule() {
xla::CompilationEnvironments::RegisterProcessNewEnvFn(
xla::GpuCompilationEnvironment::descriptor(),
xla::ProcessNewGpuCompilationEnvironment);
return true;
}
static bool module_initialized = InitModule(); | #include "xla/service/gpu_compilation_environment.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/parse_flags_from_env.h"
#include "xla/service/compilation_environments.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
void set_xla_flags_env_var(const std::string& xla_flags) {
int* pargc;
std::vector<char*>* pargv;
ResetFlagsFromEnvForTesting("XLA_FLAGS", &pargc, &pargv);
tsl::setenv("XLA_FLAGS", xla_flags.c_str(), true );
}
TEST(CreateGpuCompEnvFromFlagStringsTest, ValidFlags) {
std::vector<std::string> flags = {"--dummy_flag=2"};
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, true));
ASSERT_EQ(gpu_comp_env.dummy_flag(), 2);
ASSERT_TRUE(flags.empty());
}
TEST(CreateGpuCompEnvFromFlagStringsTest, EmptyFlags) {
std::vector<std::string> flags;
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, true));
}
TEST(CreateGpuCompEnvFromFlagStringsTest, InvalidFlagName) {
std::vector<std::string> flags = {"--xla_gpu_invalid_flag=2"};
EXPECT_THAT(CreateGpuCompEnvFromFlagStrings(flags, true),
StatusIs(tsl::error::INVALID_ARGUMENT));
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, false));
ASSERT_EQ(flags.size(), 1);
}
TEST(CreateGpuCompEnvFromEnvVarTest, ValidFlags) {
set_xla_flags_env_var("--dummy_flag=4");
TF_ASSERT_OK_AND_ASSIGN(GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromEnvVar());
ASSERT_EQ(gpu_comp_env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, BothProtoAndEnvVarUnset) {
set_xla_flags_env_var("");
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 1);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoSetButEnvVarUnset) {
set_xla_flags_env_var("");
GpuCompilationEnvironment env;
env.set_dummy_flag(2);
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 2);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoUnsetButEnvVarSet) {
set_xla_flags_env_var("--dummy_flag=4");
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest,
BothProtoAndEnvVarSetButNoConflict) {
set_xla_flags_env_var("--dummy_flag=4");
CompilationEnvironments envs;
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest,
BothProtoAndEnvVarSetWithConflict) {
set_xla_flags_env_var("--dummy_flag=4");
CompilationEnvironments envs;
GpuCompilationEnvironment env;
env.set_dummy_flag(2);
EXPECT_THAT(InitializeMissingFieldsFromXLAFlags(env),
StatusIs(tsl::error::INVALID_ARGUMENT));
}
}
} |
1,928 | cpp | tensorflow/tensorflow | all_reduce_promotion | third_party/xla/xla/service/all_reduce_promotion.cc | third_party/xla/xla/service/all_reduce_promotion_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_PROMOTION_H_
#define XLA_SERVICE_ALL_REDUCE_PROMOTION_H_
#include <utility>
#include "xla/service/change_op_data_type.h"
namespace xla {
class AllReducePromotion : public HloModulePass {
public:
explicit AllReducePromotion(
absl::Span<std::pair<PrimitiveType, PrimitiveType> const> from_to_types);
absl::string_view name() const override { return "all-reduce-promotion"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
ChangeOpDataType pass_;
};
}
#endif
#include "xla/service/all_reduce_promotion.h"
#include <memory>
#include <string>
#include <utility>
namespace xla {
namespace {
bool IsAllReduce(const HloInstruction* inst) {
return inst->opcode() == HloOpcode::kAllReduce ||
inst->opcode() == HloOpcode::kReduceScatter;
}
std::unique_ptr<HloInstruction> CloneAllReduce(
const HloInstruction* inst, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
std::unique_ptr<HloInstruction> new_inst =
inst->CloneWithNewOperands(shape, operands);
HloComputation* to_apply = new_inst->to_apply();
HloComputation* to_apply_promoted = [&]() {
PrimitiveType type = shape.element_type();
std::string name = absl::StrCat(to_apply->name(), "_promoted");
HloComputation::Builder promoted(name);
auto x = promoted.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = promoted.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
promoted.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), to_apply->root_instruction()->opcode(),
x, y));
return inst->GetModule()->AddEmbeddedComputation(promoted.Build());
}();
new_inst->set_to_apply(to_apply_promoted);
to_apply_promoted->SetCollectiveCallInstruction(new_inst.get());
return new_inst;
}
}
AllReducePromotion::AllReducePromotion(
absl::Span<std::pair<PrimitiveType, PrimitiveType> const> from_to_types)
: pass_(from_to_types, IsAllReduce, CloneAllReduce) {}
absl::StatusOr<bool> AllReducePromotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return pass_.Run(module, execution_threads);
}
} | #include "xla/service/all_reduce_promotion.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class AllReducePromotionTest : public HloTestBase {
public:
AllReducePromotion pass_{{{U16, U32}, {S16, S32}}};
};
TEST_F(AllReducePromotionTest, SimplePromotionAllReduce) {
absl::string_view hlo_text = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[2] all-reduce(a1), replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass_, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::AllReduce(m::Convert().WithShape(U32, {2}))
.WithShape(U32, {2}))
.WithShape(U16, {2})));
}
TEST_F(AllReducePromotionTest, SimplePromotionReduceScatter) {
absl::string_view hlo_text = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[1] reduce-scatter(a1), dimensions={0}, replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass_, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::ReduceScatter(m::Convert().WithShape(U32, {2}))
.WithShape(U32, {1}))
.WithShape(U16, {1})));
}
}
} |
1,929 | cpp | tensorflow/tensorflow | while_loop_unroller | third_party/xla/xla/service/while_loop_unroller.cc | third_party/xla/xla/service/while_loop_unroller_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_UNROLLER_H_
#define XLA_SERVICE_WHILE_LOOP_UNROLLER_H_
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/pattern_matcher.h"
namespace xla {
struct WhileLoopConfig {
int64_t init;
int64_t trip_count;
int64_t induction_var_idx;
};
std::optional<int64_t> MatchShapeCoveringDynamicIndexInstruction(
HloInstruction* instr, HloInstruction* input, HloOpcode opcode,
const WhileLoopConfig& config);
class WhileLoopUnroller : public HloModulePass {
public:
~WhileLoopUnroller() override = default;
explicit WhileLoopUnroller(int64_t unroll_factor = -1,
bool wrap_in_trivial_loop = false)
: unroll_factor_(unroll_factor),
wrap_in_trivial_loop_(wrap_in_trivial_loop) {}
absl::string_view name() const override { return "while_loop_unroller"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
static absl::StatusOr<bool> PrepareModuleForUnrolling(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
static std::optional<WhileLoopConfig> IsLoopUnrollable(
HloInstruction* while_op);
static std::vector<std::pair<HloInstruction*, WhileLoopConfig>>
GetUnrollableLoops(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
static absl::StatusOr<bool> Unroll(HloInstruction* while_op,
int64_t unroll_factor = -1,
bool wrap_in_trivial_loop = false,
bool force_unroll = false);
private:
int64_t unroll_factor_;
bool wrap_in_trivial_loop_;
};
}
#endif
#include "xla/service/while_loop_unroller.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/overflow_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using hlo_query::ContainsInstrWithOpcode;
const int kUnrollTripCountThreshold = 64;
const int kUnrollInstructionCountThreshold = 800;
const int kUnrollExpandFactorThreshold = 10000;
std::unique_ptr<HloComputation> MakeTrivialLoopCondition(
HloInstruction* while_op, std::string_view name, int64_t induction_idx,
int64_t init_value) {
auto condition_builder = HloComputation::Builder(name);
absl::StatusOr<HloInstruction*> param_instruction =
condition_builder.AddParameter(
while_op->while_condition()->parameter_instruction(0)->Clone());
HloInstruction* indvar_instruction =
condition_builder.AddInstruction(HloInstruction::CreateGetTupleElement(
param_instruction.value(), induction_idx));
HloInstruction* init_value_constant = condition_builder.AddInstruction(
MakeConstantWithShape(indvar_instruction->shape(), init_value));
return condition_builder.Build(
condition_builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PrimitiveType::PRED, {}), indvar_instruction,
init_value_constant, ComparisonDirection::kLe)));
}
absl::Status HandleDynamicGteOrTuple(HloInstruction* instr, int64_t iter_num) {
if (instr->IsCustomCall("DynamicGte")) {
return instr->parent()->ReplaceInstruction(
instr, instr->AddInstruction(HloInstruction::CreateGetTupleElement(
instr->mutable_operand(0), iter_num)));
} else if (instr->IsCustomCall("DynamicTuple")) {
std::vector<HloInstruction*> tuple_operands;
for (int64_t i = 0; i < instr->operand(0)->shape().tuple_shapes_size();
i++) {
if (i == iter_num) {
tuple_operands.push_back(instr->mutable_operand(1));
} else {
HloInstruction* slice =
instr->AddInstruction(HloInstruction::CreateGetTupleElement(
instr->mutable_operand(0), i));
tuple_operands.push_back(slice);
}
}
return instr->parent()->ReplaceInstruction(
instr,
instr->AddInstruction(HloInstruction::CreateTuple(tuple_operands)));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<HloComputation>>
UnrollSingleIterationOfTrivialLoop(HloInstruction* while_op,
WhileLoopConfig config,
const int64_t induction_value) {
std::unique_ptr<HloComputation> while_body_clone =
while_op->while_body()->Clone(
absl::StrCat(while_op->name(), induction_value));
HloInstruction* induction_var_hlo =
while_op->mutable_operand(0)->mutable_operand(config.induction_var_idx);
int64_t unique_channel_id = hlo_query::NextChannelId(*while_op->GetModule());
for (HloInstruction* body_inst : while_body_clone->instructions()) {
HloInstruction* collective = IsOrHasCollectiveWithChannelId(body_inst);
if (collective != nullptr) {
collective->set_channel_id(unique_channel_id++);
}
if (!Match(body_inst,
match::GetTupleElement(match::Parameter().WithParameterNum(0))
.WithTupleIndex(config.induction_var_idx))) {
continue;
}
std::vector<HloInstruction*> indvar_uses;
indvar_uses.reserve(body_inst->users().size());
for (HloInstruction* indvar_use : body_inst->users()) {
indvar_uses.push_back(indvar_use);
}
HloInstruction* induction_value_constant = while_body_clone->AddInstruction(
MakeConstantWithShape(induction_var_hlo->shape(), induction_value));
for (HloInstruction* indvar_use : indvar_uses) {
if (Match(indvar_use, match::Add(match::GetTupleElement().WithTupleIndex(
config.induction_var_idx),
match::Constant()))) {
continue;
}
CHECK_OK(HandleDynamicGteOrTuple(indvar_use, induction_value));
for (int64_t i = 0; i < indvar_use->operand_count(); ++i) {
const HloInstruction* indvar_use_operand = indvar_use->operand(i);
if (indvar_use_operand == body_inst) {
CHECK_OK(indvar_use->ReplaceOperandWith(i, induction_value_constant));
}
}
}
}
return while_body_clone;
}
bool InitialFeasibilityCheck(HloInstruction* while_op, WhileLoopConfig config) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
VLOG(5) << "Trying to unroll " << while_op->ToShortString();
if (while_op->while_body()->instruction_count() >
kUnrollInstructionCountThreshold) {
VLOG(5) << absl::StrCat(
"Cannot unroll while loop. Too many instructions in the body: ",
while_op->while_body()->instruction_count());
return false;
}
if (config.trip_count > kUnrollTripCountThreshold) {
VLOG(5) << absl::StrCat(
"Cannot unroll while loop. The tip count is greater "
"than the threshold: ",
config.trip_count, " vs ", kUnrollTripCountThreshold);
return false;
}
if (config.trip_count * while_op->while_body()->instruction_count() >
kUnrollExpandFactorThreshold) {
VLOG(5) << absl::StrCat(
"Not attempting to unroll due to instruction count "
"increase explosion. New instruction count: ",
config.trip_count * while_op->while_body()->instruction_count(), " vs ",
kUnrollExpandFactorThreshold);
return false;
}
return true;
}
absl::StatusOr<bool> UnrollInternal(HloInstruction* while_op,
WhileLoopConfig config) {
VLOG(3) << "Unrolling while instruction " << while_op->ToShortString()
<< " with body instruction count "
<< while_op->while_body()->instruction_count();
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
HloInstruction* unrolled_body_call_op;
std::vector<HloInstruction*> call_operands = {while_op->operands().at(0)};
for (int64_t i = config.init; i < config.trip_count + config.init; ++i) {
CHECK(OverflowSafeAdd(i, (int64_t)1).has_value());
HloComputation* unrolled_body = module->AddEmbeddedComputation(
UnrollSingleIterationOfTrivialLoop(while_op, config, i).value());
unrolled_body_call_op =
computation->AddInstruction(HloInstruction::CreateCall(
while_op->shape(), call_operands, unrolled_body));
call_operands.clear();
call_operands.emplace_back(unrolled_body_call_op);
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(while_op, unrolled_body_call_op));
TF_RETURN_IF_ERROR(FlattenCallGraph().Run(module).status());
return true;
}
absl::StatusOr<bool> UnrollInternalWrapped(HloInstruction* while_op,
WhileLoopConfig config) {
VLOG(3) << "Unrolling (wrapped) while instruction "
<< while_op->ToShortString() << " with body instruction count "
<< while_op->while_body()->instruction_count();
HloModule* module = while_op->GetModule();
HloComputation* computation = while_op->parent();
HloInstruction* unrolled_body_call_op;
std::vector<HloInstruction*> call_operands;
auto body_builder =
HloComputation::Builder(absl::StrCat("unrolled-body-", while_op->name()));
absl::StatusOr<HloInstruction*> p = body_builder.AddParameter(
while_op->while_body()->parameter_instruction(0)->Clone());
call_operands.emplace_back(std::move(p.value()));
for (int64_t i = config.init; i < config.trip_count + config.init; ++i) {
CHECK(OverflowSafeAdd(i, (int64_t)1).has_value());
HloComputation* unrolled_body = module->AddEmbeddedComputation(
UnrollSingleIterationOfTrivialLoop(while_op, config, i).value());
unrolled_body_call_op =
body_builder.AddInstruction(HloInstruction::CreateCall(
while_op->shape(), call_operands, unrolled_body));
call_operands.clear();
call_operands.emplace_back(unrolled_body_call_op);
}
HloComputation* new_body =
module->AddEmbeddedComputation(body_builder.Build(unrolled_body_call_op));
HloComputation* new_cond =
module->AddEmbeddedComputation(MakeTrivialLoopCondition(
while_op, absl::StrCat("unrolled", while_op->name(), "-cond"),
config.induction_var_idx, config.init));
HloInstruction* new_while_op =
computation->AddInstruction(HloInstruction::CreateWhile(
while_op->shape(), new_cond, new_body, while_op->mutable_operand(0)));
CHECK_OK(computation->ReplaceInstruction(while_op, new_while_op));
TF_RETURN_IF_ERROR(FlattenCallGraph().Run(module).status());
return true;
}
};
bool IsLoopInductionVar(const HloInstruction* instr,
const WhileLoopConfig& config) {
if (!instr->parent()->IsFusionComputation()) {
return Match(instr, match::GetTupleElement(match::Parameter(),
config.induction_var_idx));
} else {
if (!Match(instr, match::Parameter())) {
return false;
}
HloInstruction* caller_fusion = instr->parent()->FusionInstruction();
return IsLoopInductionVar(caller_fusion->operand(instr->parameter_number()),
config);
}
}
std::optional<int64_t> MatchShapeCoveringDynamicIndexInstruction(
HloInstruction* instr, HloInstruction* input, HloOpcode opcode,
const WhileLoopConfig& config) {
int64_t start_indices_offset;
if (instr->opcode() == HloOpcode::kDynamicSlice) {
start_indices_offset = 1;
} else if (instr->opcode() == HloOpcode::kDynamicUpdateSlice) {
start_indices_offset = 2;
} else {
return std::nullopt;
}
HloInstruction* operand = instr->mutable_operand(0);
if (operand != input) {
return std::nullopt;
}
int64_t dynamic_index = -1;
for (int64_t start_index = start_indices_offset;
start_index < instr->operand_count(); ++start_index) {
HloInstruction* index = instr->mutable_operand(start_index);
if (Match(index, match::ConstantScalar())) {
std::optional<int64_t> offset =
LiteralUtil::LiteralAsScalarInt64(index->literal());
if (offset.has_value() && offset.value() != 0) {
return std::nullopt;
}
}
if (IsLoopInductionVar(index, config)) {
if (dynamic_index != -1) {
return std::nullopt;
}
dynamic_index = start_index - start_indices_offset;
}
}
if (dynamic_index == -1) {
return std::nullopt;
}
if (operand->shape().dimensions(dynamic_index) != config.trip_count) {
return std::nullopt;
}
return dynamic_index;
}
std::optional<WhileLoopConfig> WhileLoopUnroller::IsLoopUnrollable(
HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
CHECK_EQ(while_op->operands().size(), 1);
if (while_op->operands().size() != 1) {
VLOG(5) << absl::StrCat(
"Cannot unroll while loop ", while_op->name(),
". While loop must have a single "
"tuple operand, instead has more than one operand: ",
while_op->operands().size());
return std::nullopt;
}
std::vector<HloInstruction*> while_dependees;
for (HloComputation* comp : while_op->GetModule()->computations()) {
for (HloInstruction* instr : comp->instructions()) {
for (HloInstruction* control_dep : instr->control_predecessors()) {
if (control_dep->opcode() == HloOpcode::kWhile) {
while_dependees.push_back(control_dep);
}
}
}
}
if (absl::linear_search(while_dependees.begin(), while_dependees.end(),
while_op)) {
VLOG(2) << "Not attempting to unroll " << while_op->name()
<< " due to control dependency: " << while_op->ToShortString();
return std::nullopt;
}
if (ContainsInstrWithOpcode(while_op->while_body(),
{HloOpcode::kSend, HloOpcode::kSendDone,
HloOpcode::kRecv, HloOpcode::kRecvDone}) ||
ContainsInstrWithOpcode(while_op->while_condition(),
{HloOpcode::kSend, HloOpcode::kSendDone,
HloOpcode::kRecv, HloOpcode::kRecvDone})) {
VLOG(2) << "Not attempting to unroll " << while_op->name()
<< " because it contains a send/recv node: "
<< while_op->ToShortString();
return std::nullopt;
}
if (while_op->operand(0)->opcode() != HloOpcode::kTuple) {
VLOG(2) << "Not attempting to unroll " << while_op->name()
<< " because the operand is not a tuple: "
<< while_op->ToShortString();
return std::nullopt;
}
if (while_op->while_condition()->HasSideEffect()) {
VLOG(2) << "Not attempting to remove while loop whose condition contains "
"side-effecting instructions: "
<< while_op->ToShortString();
return std::nullopt;
}
std::optional<int64_t> indvar_tuple_idx =
GetLoopInductionVarTupleIdx(while_op);
if (!indvar_tuple_idx.has_value()) {
return std::nullopt;
}
HloEvaluator evaluator(0);
const HloInstruction* while_init = while_op->operand(0);
const HloInstruction* indvar_init = while_init->operand(*indvar_tuple_idx);
absl::StatusOr<Literal> indvar_init_result = evaluator.Evaluate(indvar_init);
if (!indvar_init_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable init, "
<< indvar_init_result.status() << ", " << indvar_init->ToString();
return std::nullopt;
}
Literal indvar_iter_val = std::move(indvar_init_result).value();
std::optional<int64_t> trip_count =
MatchTrivialLoopTripCount(while_op, *indvar_tuple_idx, indvar_iter_val);
if (!trip_count.has_value()) {
VLOG(3) << "Loop doesn't have trivial trip count";
return std::nullopt;
}
VLOG(3) << "Loop trip count " << trip_count.value();
WhileLoopConfig config;
config.init =
LiteralUtil::LiteralAsScalarInt64(std::move(indvar_iter_val)).value();
config.trip_count = trip_count.value();
config.induction_var_idx = *indvar_tuple_idx;
return config;
}
absl::StatusOr<bool> WhileLoopUnroller::PrepareModuleForUnrolling(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(
bool applied_cse,
HloCSE(true, false,
false, true)
.Run(module, execution_threads));
if (applied_cse) {
changed = true;
VLOG(3) << "Applied hlo cse to module " << module->name();
}
TF_ASSIGN_OR_RETURN(bool applied_tuple_simplifier,
TupleSimplifier{}.Run(module, execution_threads));
if (applied_tuple_simplifier) {
changed = true;
VLOG(3) << "Applied tuple simplifier to module " << module->name();
}
HloPassFix<WhileLoopConstantSinking> constant_sinking(
true,
true);
TF_ASSIGN_OR_RETURN(bool applied_constant_sinking,
constant_sinking.Run(module, execution_threads));
if (applied_constant_sinking) {
changed = true;
VLOG(3) << "Applied constant sinking to module " << module->name();
}
return changed;
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>>
WhileLoopUnroller::GetUnrollableLoops(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> all_while_ops;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(all_while_ops),
HloPredicateIsOp<HloOpcode::kWhile>);
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> while_loop_configs;
for (HloInstruction* instr : all_while_ops) {
std::optional<WhileLoopConfig> config = IsLoopUnrollable(instr);
if (config.has_value()) {
if (!InitialFeasibilityCheck(instr, config.value())) {
VLOG(3) << "Initial feasibility check failed for " << instr->name();
continue;
}
while_loop_configs.emplace_back(instr, config.value());
}
}
return while_loop_configs;
}
absl::StatusOr<bool> WhileLoopUnroller::Unroll(
HloInstruction* while_op, int64_t unroll_factor, bool wrap_in_trivial_loop,
bool force_unroll) {
bool changed = false;
HloModule* module = while_op->GetModule();
if (unroll_factor != -1) {
VLOG(5) << absl::StrCat(
"Currently, only full unrolling is supported, unroll factor: ",
unroll_factor);
return false;
}
TF_ASSIGN_OR_RETURN(
changed, PrepareModuleForUnrolling(module, {}));
std::optional<WhileLoopConfig> config = IsLoopUnrollable(while_op);
if (!config.has_value()) {
VLOG(5) << "Not attempting to unroll " << while_op->name()
<< " because it is not unrollable.";
return false;
}
if (!force_unroll && !InitialFeasibilityCheck(while_op, config.value())) {
return false;
}
bool unrolled = false;
if (wrap_in_trivial_loop) {
TF_ASSIGN_OR_RETURN(unrolled,
UnrollInternalWrapped(while_op, config.value()));
} else {
TF_ASSIGN_OR_RETURN(unrolled, UnrollInternal(while_op, config.value()));
}
if (unrolled) {
TF_RETURN_IF_ERROR(CallInliner().Run(module).status());
}
return unrolled;
}
absl::StatusOr<bool> WhileLoopUnroller::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (unroll_factor_ != -1) {
return false;
}
XLA_VLOG_LINES(3, "WhileLoopUnroller::Run(), before:\n" + module->ToString());
bool changed = false;
TF_ASSIGN_OR_RETURN(changed,
PrepareModuleForUnrolling(module, execution_threads));
std::vector<HloInstruction*> all_while_ops;
for (auto* comp : module->MakeComputationPostOrder(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(all_while_ops),
HloPredicateIsOp<HloOpcode::kWhile>);
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>>
unrollable_while_ops = GetUnrollableLoops(module, execution_threads);
VLOG(3) << "Number of while instructions in the module to unroll: "
<< unrollable_while_ops.size();
bool unrolled = false;
for (auto& [while_op, config] : unrollable_while_ops) {
if (wrap_in_trivial_loop_) {
TF_ASSIGN_OR_RETURN(unrolled, UnrollInternalWrapped(while_op, config));
} else {
TF_ASSIGN_OR_RETURN(unrolled, UnrollInternal(while_op, config));
}
changed |= unrolled;
}
if (changed) {
TF_RETURN_IF_ERROR(CallInliner().Run(module, execution_threads).status());
}
XLA_VLOG_LINES(3, "WhileLoopUnroller::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/while_loop_unroller.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class WhileLoopUnrollerTest : public HloTestBase {
protected:
[[nodiscard]] std::unique_ptr<VerifiedHloModule> MakeModuleWithSimpleLoop(
int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithLoopBodyIndirectInc(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithNestedLoopBodyIndirectInc(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithLoopBodyNestedCopyIndVar(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithWhileFeedingAnotherWhile(int num_iters);
[[nodiscard]] std::unique_ptr<VerifiedHloModule>
MakeModuleWithSimpleLoopAllReduce(int num_iters);
public:
void UnrollAndCompare(std::unique_ptr<HloModule> module,
absl::Span<Literal* const> arguments,
int64_t unroll_factor = -1, bool wrap_in_loop = false) {
Literal before_unroll = ExecuteAndTransfer(module->Clone(), arguments);
VLOG(2) << "before unroll value: " << before_unroll.ToString();
EXPECT_TRUE(WhileLoopUnroller(unroll_factor, wrap_in_loop)
.Run(module.get())
.value());
Literal after_unroll = ExecuteAndTransfer(std::move(module), arguments);
VLOG(2) << "after unroll value: " << after_unroll.ToString();
ASSERT_TRUE(LiteralTestUtil::NearOrEqual(before_unroll,
after_unroll,
std::nullopt));
}
};
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithSimpleLoop(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(loop_var.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[]{:T(128)}, s32[3]{0}) tuple(idx, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithLoopBodyIndirectInc(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}) tuple(inc, get-tuple-element.2, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, constant.4)
ROOT while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithLoopBodyNestedCopyIndVar(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
inner-copy = s32[] copy(get-tuple-element.1)
outer-copy = s32[] reshape(inner-copy)
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(outer-copy, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}) tuple(inc, get-tuple-element.2, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, constant.4)
ROOT while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithNestedLoopBodyIndirectInc(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}) tuple(inc, get-tuple-element.2, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, constant.4)
ROOT while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
OuterLoop.body {
loop_var.1 = (s32[], s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
get-tuple-element.22 = s32[3]{0} get-tuple-element(loop_var.1), index=2
get-tuple-element.3 = s32[10]{0} get-tuple-element(loop_var.1), index=3
output = s32[10]{0} add(get-tuple-element.3, get-tuple-element.3)
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
tuple.1 = (s32[], s32[], s32[3]{0}) tuple(constant.3, constant.1, get-tuple-element.22)
inner-while = (s32[], s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
get-tuple-element.6 = s32[3]{0} get-tuple-element(inner-while), index=2
inc = s32[] add(get-tuple-element.1, get-tuple-element.2)
ROOT tuple = (s32[], s32[], s32[3]{0}, s32[10]{0}) tuple(inc, get-tuple-element.2, get-tuple-element.6, output)
}
OuterLoop.condition {
loop_var.2 = (s32[], s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY OuterLoop {
constant.1 = s32[] constant(1)
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
constant.5 = s32[10]{0} constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
tuple.1 = (s32[], s32[], s32[3]{0}, s32[10]{0}) tuple(constant.3, constant.1, constant.4, constant.5)
ROOT while = (s32[], s32[], s32[3]{0}, s32[10]{0}) while(tuple.1), condition=
OuterLoop.condition, body=OuterLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithWhileFeedingAnotherWhile(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
const1 = s32[] constant(1)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=1
output = s32[3]{0} add(get-tuple-element.3, get-tuple-element.3)
inc = s32[] add(get-tuple-element.1, const1)
ROOT tuple = (s32[], s32[3]{0}) tuple(inc, output)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
OuterLoop.body {
loop_var.1 = (s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.22 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s32[10]{0} get-tuple-element(loop_var.1), index=2
output1 = s32[3]{0} add(get-tuple-element.22, get-tuple-element.22)
output2 = s32[10]{0} add(get-tuple-element.3, get-tuple-element.3)
one = s32[] constant(1)
inc = s32[] add(get-tuple-element.1, one)
ROOT tuple = (s32[], s32[3]{0}, s32[10]{0}) tuple(inc, output1, output2)
}
OuterLoop.condition {
loop_var.2 = (s32[], s32[3]{0}, s32[10]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY entry.comp {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
constant.5 = s32[10]{0} constant({0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
inner-while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
get-tuple-element.6 = s32[3]{0} get-tuple-element(inner-while), index=1
tuple.2 = (s32[], s32[3]{0}, s32[10]{0}) tuple(constant.3, get-tuple-element.6, constant.5)
ROOT while = (s32[], s32[3]{0}, s32[10]{0}) while(tuple.2), condition=
OuterLoop.condition, body=OuterLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
std::unique_ptr<VerifiedHloModule>
WhileLoopUnrollerTest::MakeModuleWithSimpleLoopAllReduce(int num_iters) {
std::string hlo_string_template = R"(
HloModule SimpleLoop
%reduction {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
SimpleLoop.body {
loop_var.1 = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = f32[1024, 1024] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = f32[1024, 1024] get-tuple-element(loop_var.1), index=2
%all-reduce = f32[1024, 1024] all-reduce(f32[1024, 1024] get-tuple-element.2), channel_id=1, replica_groups={{0}}, to_apply=%reduction
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %all-reduce, f32[1024, 1024] get-tuple-element.3)
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(add, get-tuple-element.2, %accumulation)
}
SimpleLoop.condition {
loop_var.2 = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant({{LOOP_BOUND}})
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
%param.1 = f32[1024, 1024] parameter(0)
constant.3 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
tuple.1 = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(constant.3, %param.1, %accumulation_buffer)
ROOT while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(tuple.1), condition=SimpleLoop.condition, body=SimpleLoop.body
}
)";
std::string hlo_string = absl::StrReplaceAll(
hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(num_iters)}});
return ParseAndReturnVerifiedModule(hlo_string).value();
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopUnroll) {
UnrollAndCompare(MakeModuleWithSimpleLoop(5), {}, -1, false);
UnrollAndCompare(MakeModuleWithSimpleLoop(5), {}, -1, true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopUnrollNeedPrepare) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s64[] get-tuple-element(loop_var.1), index=2
add = s64[] add(get-tuple-element.1, get-tuple-element.3)
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}, s64[]) tuple(add, multiply, get-tuple-element.3)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
one = s64[] constant(1)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}, s64[]) tuple(constant.3, constant.4, one)
while = (s64[], s32[3]{0}, s64[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopUnrollNeedPrepare2) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = s64[] get-tuple-element(loop_var.1), index=2
add = s64[] add(get-tuple-element.1, get-tuple-element.3)
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}, s64[]) tuple(add, multiply, get-tuple-element.3)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}, s64[]) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
one = s64[] constant(1)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}, s64[]) tuple(constant.3, constant.4, one)
gte1 = s64[] get-tuple-element(tuple.1), index=0
gte2 = s32[3]{0} get-tuple-element(tuple.1), index=1
gte3 = s64[] get-tuple-element(tuple.1), index=2
tuple = (s64[], s32[3]{0}, s64[]) tuple(gte1, gte2, gte3)
while = (s64[], s32[3]{0}, s64[]) while(tuple), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopNotRoot) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, GetUnrollableLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body.2 {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition.2 {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body.3 {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] multiply(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition.3 {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while1 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
while3 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition.3, body=SimpleLoop.body.3
while2 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition.2, body=SimpleLoop.body.2
o1 = s32[3]{0} get-tuple-element(while1), index=1
o2 = s32[3]{0} get-tuple-element(while2), index=1
ROOT result = (s32[3]{0}, s32[3]{0}) tuple(o1,o2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto unrollable_loops =
WhileLoopUnroller::GetUnrollableLoops(module.get(), {});
EXPECT_EQ(unrollable_loops.size(), 2);
}
TEST_F(WhileLoopUnrollerTest, UnrollMutipleLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body.2 {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition.2 {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while1 = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
input = s32[3]{0} get-tuple-element(while1), index=1
tuple.2 = (s64[], s32[3]{0}) tuple(constant.3, input)
while2 = (s64[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition.2, body=SimpleLoop.body.2
o1 = s32[3]{0} get-tuple-element(while1), index=1
o2 = s32[3]{0} get-tuple-element(while2), index=1
ROOT result = (s32[3]{0}, s32[3]{0}) tuple(o1,o2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool unrolled1,
WhileLoopUnroller::Unroll(
module->entry_computation()->GetInstructionWithName("while1")));
EXPECT_TRUE(unrolled1);
std::vector<HloInstruction*> call_instrs_1;
for (auto* comp : module->MakeComputationPostOrder()) {
absl::c_copy_if(comp->instructions(), std::back_inserter(call_instrs_1),
HloPredicateIsOp<HloOpcode::kCall>);
}
EXPECT_EQ(call_instrs_1.size(), 0);
TF_ASSERT_OK_AND_ASSIGN(
bool unrolled2,
WhileLoopUnroller::Unroll(
module->entry_computation()->GetInstructionWithName("while2")));
EXPECT_TRUE(unrolled2);
std::vector<HloInstruction*> call_instrs_2;
for (auto* comp : module->MakeComputationPostOrder()) {
absl::c_copy_if(comp->instructions(), std::back_inserter(call_instrs_2),
HloPredicateIsOp<HloOpcode::kCall>);
}
EXPECT_EQ(call_instrs_2.size(), 0);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopNonZeroInit) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s64[] get-tuple-element(loop_var.1), index=0
constant.1 = s64[] constant(1)
add = s64[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s64[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s64[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s64[] get-tuple-element(loop_var.2), index=0
constant.2 = s64[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s64[] constant(4)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s64[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s64[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT result = s32[3]{0} get-tuple-element(while), index=1
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopS16IndVar) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s16[] get-tuple-element(loop_var.1), index=0
constant.1 = s16[] constant(1)
add = s16[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s16[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s16[] get-tuple-element(loop_var.2), index=0
constant.2 = s16[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s16[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s16[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s16[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
false);
UnrollAndCompare(ParseAndReturnVerifiedModule(hlo_string).value(), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, LoopWithControlDep) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s16[] get-tuple-element(loop_var.1), index=0
constant.1 = s16[] constant(1)
add = s16[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s16[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s16[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s16[] get-tuple-element(loop_var.2), index=0
constant.2 = s16[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s16[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s16[], s32[3]{0}) tuple(constant.3, constant.4)
while1 = (s16[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
copy1 = copy(constant.3), control-predecessors={while1}
ROOT add = add(copy1, constant.3)
}
)";
EXPECT_FALSE(WhileLoopUnroller()
.Run(ParseAndReturnVerifiedModule(hlo_string).value().get())
.value());
}
TEST_F(WhileLoopUnrollerTest, SimpleLoopPartialUnroll) {
auto m = MakeModuleWithSimpleLoop(5);
EXPECT_FALSE(WhileLoopUnroller(3).Run(m.get()).value());
}
TEST_F(WhileLoopUnrollerTest, IndirectBodyInc) {
std::unique_ptr<HloModule> module =
MakeModuleWithLoopBodyIndirectInc(5);
UnrollAndCompare(MakeModuleWithLoopBodyIndirectInc(5), {}, -1,
false);
UnrollAndCompare(MakeModuleWithLoopBodyIndirectInc(5), {}, -1,
true);
}
TEST_F(WhileLoopUnrollerTest, NestedIndirectBodyInc) {
std::unique_ptr<HloModule> module =
MakeModuleWithNestedLoopBodyIndirectInc(5);
UnrollAndCompare(MakeModuleWithNestedLoopBodyIndirectInc(5), {},
-1, false);
UnrollAndCompare(MakeModuleWithNestedLoopBodyIndirectInc(5), {},
-1, true);
}
TEST_F(WhileLoopUnrollerTest, WhileFeedingWhile) {
UnrollAndCompare(MakeModuleWithWhileFeedingAnotherWhile(5), {},
-1, false);
UnrollAndCompare(MakeModuleWithWhileFeedingAnotherWhile(5), {},
-1, true) |
1,930 | cpp | tensorflow/tensorflow | call_graph | third_party/xla/xla/service/call_graph.cc | third_party/xla/xla/service/call_graph_test.cc | #ifndef XLA_SERVICE_CALL_GRAPH_H_
#define XLA_SERVICE_CALL_GRAPH_H_
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/logging.h"
namespace xla {
enum class CallContext {
kEmbedded,
kControlFlow,
kBoth,
kNone
};
std::string CallContextToString(CallContext context);
std::ostream& operator<<(std::ostream& out, const CallContext& context);
CallContext GetInstructionCallContext(HloOpcode opcode);
class CallSite {
public:
CallSite(HloInstruction* instruction,
absl::Span<HloComputation* const> called_computations,
CallContext context)
: instruction_(CHECK_NOTNULL(instruction)),
called_computations_(called_computations.begin(),
called_computations.end()),
context_(context) {}
HloInstruction* instruction() const { return instruction_; }
absl::Span<HloComputation* const> called_computations() const {
return called_computations_;
}
CallContext context() const { return context_; }
std::string ToString() const;
private:
HloInstruction* instruction_;
const absl::InlinedVector<HloComputation*, 2> called_computations_;
const CallContext context_;
};
class CallGraphNode {
public:
explicit CallGraphNode(HloComputation* computation);
HloComputation* computation() const { return computation_; }
absl::Span<const CallSite> callsites() const { return callsites_; }
const CallSite* GetCallSite(const HloInstruction* instruction) const;
absl::Span<HloComputation* const> callees() const { return callees_; }
absl::Span<const CallSite> caller_callsites() const {
return caller_callsites_;
}
absl::Span<HloComputation* const> callers() const { return callers_; }
CallContext context() const { return context_; }
int depth() const { return depth_; }
absl::string_view ToString() const;
CallGraphNode(const CallGraphNode&) = delete;
CallGraphNode& operator=(const CallGraphNode&) = delete;
CallGraphNode(CallGraphNode&&) = default;
CallGraphNode& operator=(CallGraphNode&&) = default;
private:
friend class CallGraph;
void set_context(CallContext value) { context_ = value; }
void set_depth(int value) { depth_ = value; }
void AddCallerCallSite(const CallSite& caller_callsite);
void AddCallSiteForInstruction(
HloInstruction* instruction,
const absl::flat_hash_set<absl::string_view>& execution_threads = {});
HloComputation* computation_;
absl::InlinedVector<HloComputation*, 1> callees_;
absl::flat_hash_set<HloComputation*> callee_set_;
absl::InlinedVector<HloComputation*, 1> callers_;
absl::flat_hash_set<HloComputation*> caller_set_;
absl::InlinedVector<CallSite, 1> callsites_;
absl::flat_hash_map<const HloInstruction*, int64_t> callsite_instructions_;
absl::InlinedVector<CallSite, 1> caller_callsites_;
CallContext context_ = CallContext::kNone;
int depth_ = 0;
};
class CallGraph {
public:
using VisitorFunction = absl::FunctionRef<absl::Status(const CallGraphNode&)>;
static std::unique_ptr<CallGraph> Build(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {});
const CallGraphNode& GetNode(const HloComputation* computation) const;
CallGraphNode& GetNode(const HloComputation* computation);
const std::vector<CallGraphNode>& nodes() const { return nodes_; }
absl::Status VisitNodes(VisitorFunction visitor_func,
bool visit_unreachable_nodes = true) const;
bool Dominates(const HloComputation* a, const HloComputation* b) const;
bool CanReach(const HloComputation* a, const HloComputation* b) const;
bool InstructionIsNestedIn(const HloInstruction* instruction,
const HloComputation* computation) const {
return Dominates(computation, instruction->parent());
}
std::pair<HloInstruction*, HloInstruction*> NearestAncestorsInSameComputation(
HloInstruction* a, HloInstruction* b) const;
absl::flat_hash_set<const HloInstruction*> NearestCommonAncestorInstructions(
std::vector<const HloInstruction*> instructions);
absl::flat_hash_set<const HloComputation*> NearestCommonAncestorComputations(
std::vector<const HloComputation*> computations);
template <typename T>
absl::flat_hash_set<const T*> NearestCommonAncestorsHelper(
std::vector<const T*>& starting_nodes);
bool IsFlattened() const;
std::vector<HloInstruction*> GetComputationCallers(
const HloComputation* c) const;
std::string ToString() const;
private:
explicit CallGraph(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {});
CallGraph(const CallGraph&) = delete;
CallGraph& operator=(const CallGraph&) = delete;
void SetCallContexts();
void SetNodeDepths();
absl::Status VisitNodesInternal(
VisitorFunction visitor_func, const CallGraphNode& node,
absl::flat_hash_set<const CallGraphNode*>* visited) const;
bool DominatesHelper(
const HloComputation* a, const HloComputation* b,
absl::flat_hash_set<const HloComputation*>* visited) const;
const HloModule* module_ = nullptr;
std::vector<CallGraphNode> nodes_;
absl::flat_hash_map<const HloComputation*, int64_t> node_indices_;
absl::flat_hash_set<absl::string_view> execution_threads_;
};
}
#endif
#include "xla/service/call_graph.h"
#include <deque>
#include <memory>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/map_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
using absl::StrAppendFormat;
using absl::StrCat;
std::string CallContextToString(CallContext context) {
switch (context) {
case CallContext::kNone:
return "kNone";
case CallContext::kControlFlow:
return "kControlFlow";
case CallContext::kEmbedded:
return "kEmbedded";
case CallContext::kBoth:
return "kBoth";
}
}
std::ostream& operator<<(std::ostream& out, const CallContext& context) {
out << CallContextToString(context);
return out;
}
CallContext GetInstructionCallContext(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kWhile:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
return CallContext::kControlFlow;
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kTopK:
case HloOpcode::kFusion:
case HloOpcode::kCustomCall:
return CallContext::kEmbedded;
default:
return CallContext::kNone;
}
}
std::string CallSite::ToString() const {
return StrCat(
instruction()->name(), " calls in context ",
CallContextToString(context()), ": ",
absl::StrJoin(called_computations(), ", ",
[](std::string* out, const HloComputation* computation) {
absl::StrAppend(out, computation->name());
}));
}
CallGraphNode::CallGraphNode(HloComputation* computation)
: computation_(computation) {}
const CallSite* CallGraphNode::GetCallSite(
const HloInstruction* instruction) const {
auto it = callsite_instructions_.find(instruction);
if (it == callsite_instructions_.end()) {
return nullptr;
}
return &callsites_[it->second];
}
absl::string_view CallGraphNode::ToString() const {
return computation_->name();
}
void CallGraphNode::AddCallerCallSite(const CallSite& caller_callsite) {
caller_callsites_.push_back(caller_callsite);
HloComputation* caller = caller_callsite.instruction()->parent();
if (!ContainsKey(caller_set_, caller)) {
callers_.push_back(caller);
caller_set_.insert(caller);
}
}
void CallGraphNode::AddCallSiteForInstruction(
HloInstruction* instruction,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
CHECK_EQ(instruction->parent(), computation());
const CallContext context = GetInstructionCallContext(instruction->opcode());
if (!instruction->called_computations().empty()) {
CHECK(context == CallContext::kControlFlow ||
context == CallContext::kEmbedded);
callsite_instructions_.insert({instruction, callsites_.size()});
callsites_.push_back(
CallSite(instruction, instruction->called_computations(), context));
for (auto* callee : callsites_.back().called_computations()) {
if (HloInstruction::IsThreadIncluded(callee->execution_thread(),
execution_threads) &&
!ContainsKey(callee_set_, callee)) {
callees_.push_back(callee);
callee_set_.insert(callee);
}
}
}
}
CallGraph::CallGraph(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: module_(module), execution_threads_(execution_threads) {}
const CallGraphNode& CallGraph::GetNode(
const HloComputation* computation) const {
DCHECK(node_indices_.contains(computation));
return nodes_[node_indices_.find(computation)->second];
}
CallGraphNode& CallGraph::GetNode(const HloComputation* computation) {
DCHECK(node_indices_.contains(computation));
return nodes_[node_indices_.find(computation)->second];
}
bool CallGraph::DominatesHelper(
const HloComputation* a, const HloComputation* b,
absl::flat_hash_set<const HloComputation*>* visited) const {
if (a == b || ContainsKey(*visited, b)) {
return true;
}
const CallGraphNode& b_node = GetNode(b);
if (b_node.callers().empty()) {
return false;
}
visited->insert(b);
for (const HloComputation* b_caller : b_node.callers()) {
if (!DominatesHelper(a, b_caller, visited)) {
return false;
}
}
return true;
}
bool CallGraph::Dominates(const HloComputation* a,
const HloComputation* b) const {
absl::flat_hash_set<const HloComputation*> visited;
return DominatesHelper(a, b, &visited);
}
bool CallGraph::CanReach(const HloComputation* a,
const HloComputation* b) const {
if (a == b) {
return true;
}
const CallGraphNode& b_node = GetNode(b);
for (const HloComputation* b_caller : b_node.callers()) {
if (CanReach(a, b_caller)) {
return true;
}
}
return false;
}
namespace {
CallContext UnionContexts(CallContext a, CallContext b) {
if (a == CallContext::kNone) {
return b;
} else if (b == CallContext::kNone) {
return a;
} else if (a == b) {
return a;
} else {
return CallContext::kBoth;
}
}
}
void CallGraph::SetCallContexts() {
std::queue<CallGraphNode*> worklist;
for (const HloComputation* computation :
module_->computations(execution_threads_)) {
CallGraphNode& node = GetNode(computation);
if (node.callers().empty()) {
node.set_context(CallContext::kControlFlow);
worklist.push(&node);
}
}
while (!worklist.empty()) {
CallGraphNode* node = worklist.front();
worklist.pop();
for (const CallSite& callsite : node->callsites()) {
for (const HloComputation* callee : callsite.called_computations()) {
if (!HloInstruction::IsThreadIncluded(callee->execution_thread(),
execution_threads_)) {
continue;
}
CallGraphNode& callee_node = GetNode(callee);
CallContext context_to_add;
if (callsite.context() == CallContext::kEmbedded) {
context_to_add = CallContext::kEmbedded;
} else {
CHECK_EQ(callsite.context(), CallContext::kControlFlow);
context_to_add = node->context();
}
CallContext new_context =
UnionContexts(context_to_add, callee_node.context());
if (new_context != callee_node.context()) {
callee_node.set_context(new_context);
worklist.push(&callee_node);
}
}
}
}
for (const HloComputation* computation :
module_->computations(execution_threads_)) {
CHECK_NE(GetNode(computation).context(), CallContext::kNone);
}
}
void CallGraph::SetNodeDepths() {
std::queue<CallGraphNode*> worklist;
for (CallGraphNode& node : nodes_) {
node.set_depth(-1);
}
for (const HloComputation* computation :
module_->computations(execution_threads_)) {
CallGraphNode& node = GetNode(computation);
if (node.callers().empty()) {
node.set_depth(0);
worklist.push(&node);
}
}
while (!worklist.empty()) {
CallGraphNode* node = worklist.front();
worklist.pop();
for (const HloComputation* callee : node->callees()) {
CallGraphNode& callee_node = GetNode(callee);
if (callee_node.depth() < node->depth() + 1) {
callee_node.set_depth(node->depth() + 1);
worklist.push(&callee_node);
}
}
}
for (CallGraphNode& node : nodes_) {
CHECK_NE(node.depth(), -1);
}
}
std::unique_ptr<CallGraph> CallGraph::Build(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto call_graph =
absl::WrapUnique<CallGraph>(new CallGraph(module, execution_threads));
VLOG(3) << "Building call graph for:";
XLA_VLOG_LINES(3, module->ToString());
for (HloComputation* computation : module->computations(execution_threads)) {
auto it_added = call_graph->node_indices_.insert(
{computation, call_graph->nodes_.size()});
CHECK(it_added.second);
call_graph->nodes_.emplace_back(computation);
for (HloInstruction* instruction : computation->instructions()) {
call_graph->nodes_.back().AddCallSiteForInstruction(instruction,
execution_threads);
}
}
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const CallSite& callsite :
call_graph->GetNode(computation).callsites()) {
for (auto* callee : callsite.called_computations()) {
if (!HloInstruction::IsThreadIncluded(callee->execution_thread(),
execution_threads)) {
continue;
}
call_graph->GetNode(callee).AddCallerCallSite(callsite);
}
}
}
call_graph->SetCallContexts();
call_graph->SetNodeDepths();
XLA_VLOG_LINES(2, call_graph->ToString());
return call_graph;
}
absl::Status CallGraph::VisitNodesInternal(
VisitorFunction visitor_func, const CallGraphNode& node,
absl::flat_hash_set<const CallGraphNode*>* visited) const {
auto pair = visited->insert(&node);
if (!pair.second) {
return absl::OkStatus();
}
for (const HloComputation* computation : node.callees()) {
TF_RETURN_IF_ERROR(
VisitNodesInternal(visitor_func, GetNode(computation), visited));
}
return visitor_func(node);
}
absl::Status CallGraph::VisitNodes(VisitorFunction visitor_func,
bool visit_unreachable_nodes) const {
absl::flat_hash_set<const CallGraphNode*> visited;
if (visit_unreachable_nodes) {
for (const CallGraphNode& node : nodes()) {
if (node.callers().empty()) {
TF_RETURN_IF_ERROR(VisitNodesInternal(visitor_func, node, &visited));
}
}
} else {
TF_RETURN_IF_ERROR(VisitNodesInternal(
visitor_func, GetNode(module_->entry_computation()), &visited));
}
return absl::OkStatus();
}
bool CallGraph::IsFlattened() const {
for (const CallGraphNode& node : nodes_) {
if (node.context() == CallContext::kBoth) {
return false;
}
if (node.context() == CallContext::kControlFlow &&
!node.computation()->IsAsyncComputation() &&
node.caller_callsites().size() > 1) {
return false;
}
}
return true;
}
std::vector<HloInstruction*> CallGraph::GetComputationCallers(
const HloComputation* c) const {
std::vector<HloInstruction*> callers;
for (const auto& callsite : GetNode(c).caller_callsites()) {
callers.push_back(callsite.instruction());
}
return callers;
}
std::pair<HloInstruction*, HloInstruction*>
CallGraph::NearestAncestorsInSameComputation(HloInstruction* a,
HloInstruction* b) const {
auto next_caller = [this](HloInstruction* instruction) -> HloInstruction* {
const CallGraphNode& node = GetNode(instruction->parent());
if (node.caller_callsites().size() != 1) {
if (instruction->parent()->IsAsyncComputation()) {
return node.caller_callsites()[0].instruction();
}
return nullptr;
}
return node.caller_callsites()[0].instruction();
};
HloInstruction* a_ancestor = a;
HloInstruction* b_ancestor = b;
int a_depth = GetNode(a->parent()).depth();
int b_depth = GetNode(b->parent()).depth();
if (a_depth > b_depth) {
for (int i = 0; i < a_depth - b_depth; ++i) {
a_ancestor = next_caller(a_ancestor);
if (a_ancestor == nullptr) {
return {nullptr, nullptr};
}
}
} else if (b_depth > a_depth) {
for (int i = 0; i < b_depth - a_depth; ++i) {
b_ancestor = next_caller(b_ancestor);
if (b_ancestor == nullptr) {
return {nullptr, nullptr};
}
}
}
while ((a_ancestor != nullptr) && (b_ancestor != nullptr)) {
if (a_ancestor->parent() == b_ancestor->parent()) {
return {a_ancestor, b_ancestor};
}
a_ancestor = next_caller(a_ancestor);
b_ancestor = next_caller(b_ancestor);
}
return {nullptr, nullptr};
}
template <typename T>
absl::flat_hash_set<const T*> CallGraph::NearestCommonAncestorsHelper(
std::vector<const T*>& starting_nodes) {
CHECK(
(std::is_same_v<T, HloInstruction> || std::is_same_v<T, HloComputation>));
if (starting_nodes.empty()) {
return absl::flat_hash_set<const T*>();
}
if (starting_nodes.size() == 1) {
return absl::flat_hash_set<const T*>({starting_nodes[0]});
}
absl::flat_hash_set<const T*> nearest_common_ancestors;
std::vector<absl::flat_hash_set<const T*>> visited_ancestors;
visited_ancestors.reserve(starting_nodes.size());
for (int idx = 0; idx < starting_nodes.size(); ++idx) {
visited_ancestors.push_back(
absl::flat_hash_set<const T*>({starting_nodes[idx]}));
}
std::vector<std::deque<const T*>> bfs_queues;
bfs_queues.reserve(starting_nodes.size());
for (int idx = 0; idx < starting_nodes.size(); ++idx) {
bfs_queues.push_back(std::deque<const T*>({starting_nodes[idx]}));
}
auto is_bfs_finished = [&bfs_queues]() -> bool {
return absl::c_all_of(
bfs_queues, [](std::deque<const T*> queue) { retur | #include "xla/service/call_graph.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class CallGraphTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> MakeScalarComputation(
HloOpcode opcode = HloOpcode::kNegate) {
HloComputation::Builder builder(TestName() + ".ScalarComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(kScalarShape, opcode, param0));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeMappingComputation(
HloComputation* map_computation, int64_t callsites) {
HloComputation::Builder builder(TestName() + ".MappingComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateMap(
kScalarShape, {last_value}, map_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeCallingComputation(
HloComputation* callee_computation, int64_t callsites,
const std::string& suffix = ".CallingComputation") {
HloComputation::Builder builder(TestName() + suffix);
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* last_value = param0;
for (int64_t i = 0; i < callsites; ++i) {
last_value = builder.AddInstruction(HloInstruction::CreateCall(
kScalarShape, {last_value}, callee_computation));
}
return builder.Build();
}
std::unique_ptr<HloComputation> MakeConditionComputation() {
HloComputation::Builder builder(TestName() + ".ConditionComputation");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
zero, ComparisonDirection::kGt));
return builder.Build();
}
const Shape kScalarShape = ShapeUtil::MakeShape(F32, {});
};
TEST_F(CallGraphTest, SingletonComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(1, call_graph->nodes().size());
EXPECT_TRUE(call_graph->IsFlattened());
const CallGraphNode& node = call_graph->GetNode(computation);
EXPECT_EQ(computation, node.computation());
EXPECT_EQ(node.depth(), 0);
EXPECT_TRUE(node.callsites().empty());
EXPECT_TRUE(node.callees().empty());
EXPECT_TRUE(node.caller_callsites().empty());
EXPECT_TRUE(node.callers().empty());
EXPECT_EQ(CallContext::kControlFlow, node.context());
}
TEST_F(CallGraphTest, UnreachableComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(MakeScalarComputation());
HloComputation* unreachable_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
const CallGraphNode& unreachable_node =
call_graph->GetNode(unreachable_computation);
EXPECT_EQ(unreachable_node.depth(), 0);
EXPECT_EQ(unreachable_computation, unreachable_node.computation());
EXPECT_EQ(CallContext::kControlFlow, unreachable_node.context());
}
TEST_F(CallGraphTest, ParallelComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* map_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* entry_computation = module->AddEntryComputation(
MakeMappingComputation(map_computation, 5));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
EXPECT_EQ(5, entry_node.callsites().size());
EXPECT_EQ(1, entry_node.callees().size());
EXPECT_TRUE(entry_node.caller_callsites().empty());
EXPECT_TRUE(call_graph->GetComputationCallers(entry_computation).empty());
EXPECT_TRUE(entry_node.callers().empty());
const CallGraphNode& map_node = call_graph->GetNode(map_computation);
EXPECT_EQ(map_computation, map_node.computation());
EXPECT_EQ(map_node.depth(), 1);
EXPECT_EQ(CallContext::kEmbedded, map_node.context());
EXPECT_TRUE(map_node.callsites().empty());
EXPECT_TRUE(map_node.callees().empty());
EXPECT_EQ(5, map_node.caller_callsites().size());
EXPECT_EQ(5, call_graph->GetComputationCallers(map_computation).size());
EXPECT_EQ(1, map_node.callers().size());
}
TEST_F(CallGraphTest, SequentialComputations) {
auto module = CreateNewVerifiedModule();
HloComputation* called_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* entry_computation = module->AddEntryComputation(
MakeCallingComputation(called_computation, 3));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
EXPECT_FALSE(call_graph->IsFlattened());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
EXPECT_EQ(3, entry_node.callsites().size());
EXPECT_EQ(1, entry_node.callees().size());
EXPECT_TRUE(entry_node.caller_callsites().empty());
EXPECT_TRUE(call_graph->GetComputationCallers(entry_computation).empty());
EXPECT_TRUE(entry_node.callers().empty());
const CallGraphNode& called_node = call_graph->GetNode(called_computation);
EXPECT_EQ(called_computation, called_node.computation());
EXPECT_EQ(CallContext::kControlFlow, called_node.context());
EXPECT_TRUE(called_node.callsites().empty());
EXPECT_TRUE(called_node.callees().empty());
EXPECT_EQ(3, called_node.caller_callsites().size());
EXPECT_EQ(3, call_graph->GetComputationCallers(called_computation).size());
EXPECT_EQ(1, called_node.callers().size());
}
TEST_F(CallGraphTest, ContextBothComputations) {
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, subcomputation));
HloInstruction* map = builder.AddInstruction(
HloInstruction::CreateMap(kScalarShape, {call}, subcomputation));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(2, call_graph->nodes().size());
EXPECT_FALSE(call_graph->IsFlattened());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(2, entry_node.callsites().size());
const CallSite& call_callsite = entry_node.callsites()[0];
EXPECT_EQ(call, call_callsite.instruction());
EXPECT_THAT(call_callsite.called_computations(),
UnorderedElementsAre(subcomputation));
EXPECT_EQ(CallContext::kControlFlow, call_callsite.context());
EXPECT_EQ(entry_node.GetCallSite(call), &call_callsite);
const CallSite& map_callsite = entry_node.callsites()[1];
EXPECT_EQ(map, map_callsite.instruction());
EXPECT_THAT(map_callsite.called_computations(),
UnorderedElementsAre(subcomputation));
EXPECT_EQ(CallContext::kEmbedded, map_callsite.context());
EXPECT_EQ(entry_node.GetCallSite(map), &map_callsite);
const CallGraphNode& sub_node = call_graph->GetNode(subcomputation);
EXPECT_EQ(sub_node.depth(), 1);
EXPECT_EQ(CallContext::kBoth, sub_node.context());
}
TEST_F(CallGraphTest, ComputationWithConditional) {
auto module = CreateNewVerifiedModule();
HloComputation* true_computation =
module->AddEmbeddedComputation(MakeScalarComputation(HloOpcode::kCeil));
HloComputation* false_computation =
module->AddEmbeddedComputation(MakeScalarComputation(HloOpcode::kFloor));
HloComputation::Builder builder(TestName());
HloInstruction* pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloInstruction* const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f)));
HloInstruction* const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.6f)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
kScalarShape, pred, const1, true_computation, const2,
false_computation));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(3, call_graph->nodes().size());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(entry_computation, entry_node.computation());
EXPECT_EQ(1, entry_node.callsites().size());
const CallSite& conditional_callsite = entry_node.callsites()[0];
EXPECT_EQ(conditional, conditional_callsite.instruction());
EXPECT_THAT(conditional_callsite.called_computations(),
UnorderedElementsAre(true_computation, false_computation));
EXPECT_EQ(CallContext::kControlFlow, conditional_callsite.context());
EXPECT_EQ(entry_node.GetCallSite(conditional), &conditional_callsite);
const CallGraphNode& true_node = call_graph->GetNode(true_computation);
EXPECT_EQ(true_node.depth(), 1);
EXPECT_TRUE(true_node.callees().empty());
EXPECT_EQ(1, true_node.callers().size());
EXPECT_EQ(entry_computation, true_node.callers()[0]);
const CallGraphNode& false_node = call_graph->GetNode(false_computation);
EXPECT_EQ(false_node.depth(), 1);
EXPECT_TRUE(false_node.callees().empty());
EXPECT_EQ(1, false_node.callers().size());
EXPECT_EQ(entry_computation, false_node.callers()[0]);
}
TEST_F(CallGraphTest, ComplexGraph) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloComputation* a_computation;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(5, call_graph->nodes().size());
EXPECT_FALSE(call_graph->IsFlattened());
const CallGraphNode& entry_node = call_graph->GetNode(entry_computation);
const CallGraphNode& a_node = call_graph->GetNode(a_computation);
const CallGraphNode& b_node = call_graph->GetNode(b_computation);
const CallGraphNode& c_node = call_graph->GetNode(c_computation);
const CallGraphNode& cond_node = call_graph->GetNode(cond_computation);
EXPECT_EQ(entry_node.depth(), 0);
EXPECT_EQ(a_node.depth(), 1);
EXPECT_EQ(b_node.depth(), 2);
EXPECT_EQ(c_node.depth(), 3);
EXPECT_EQ(cond_node.depth(), 2);
ASSERT_EQ(1, entry_node.callsites().size());
auto called_computations = entry_node.callsites()[0].called_computations();
EXPECT_THAT(called_computations,
UnorderedElementsAre(cond_computation, a_computation));
EXPECT_EQ(CallContext::kControlFlow, entry_node.context());
EXPECT_TRUE(c_node.callsites().empty());
EXPECT_THAT(c_node.callers(),
UnorderedElementsAre(a_computation, b_computation));
EXPECT_EQ(CallContext::kBoth, c_node.context());
std::vector<const HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes([&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
}));
EXPECT_EQ(visited.size(), 5);
EXPECT_EQ(
absl::flat_hash_set<const HloComputation*>(visited.begin(), visited.end())
.size(),
5);
auto index_of = [&visited](const HloComputation* comp) {
auto it = absl::c_find(visited, comp);
EXPECT_NE(it, visited.end());
return std::distance(visited.begin(), it);
};
EXPECT_EQ(4, index_of(entry_computation));
EXPECT_LT(index_of(cond_computation), index_of(a_computation));
EXPECT_LT(index_of(c_computation), index_of(b_computation));
EXPECT_LT(index_of(b_computation), index_of(a_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, entry_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, a_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, b_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, c_computation));
EXPECT_TRUE(call_graph->Dominates(entry_computation, cond_computation));
EXPECT_FALSE(call_graph->Dominates(a_computation, entry_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, entry_computation));
EXPECT_FALSE(call_graph->Dominates(c_computation, entry_computation));
EXPECT_FALSE(call_graph->Dominates(cond_computation, entry_computation));
EXPECT_TRUE(call_graph->Dominates(a_computation, a_computation));
EXPECT_TRUE(call_graph->Dominates(a_computation, b_computation));
EXPECT_TRUE(call_graph->Dominates(a_computation, c_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, a_computation));
EXPECT_FALSE(call_graph->Dominates(c_computation, a_computation));
EXPECT_FALSE(call_graph->Dominates(a_computation, cond_computation));
EXPECT_TRUE(call_graph->Dominates(b_computation, b_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, c_computation));
EXPECT_FALSE(call_graph->Dominates(b_computation, cond_computation));
EXPECT_TRUE(call_graph->Dominates(c_computation, c_computation));
EXPECT_FALSE(call_graph->Dominates(c_computation, cond_computation));
EXPECT_FALSE(call_graph->Dominates(cond_computation, c_computation));
EXPECT_TRUE(call_graph->Dominates(cond_computation, cond_computation));
}
TEST_F(CallGraphTest, ComplexGraphNearestAncestors) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloInstruction* b_map = b_computation->root_instruction();
HloComputation* a_computation;
HloInstruction* a_call;
HloInstruction* a_while;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
a_call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
a_while = builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, a_call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
HloInstruction* entry_while;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
entry_while = builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(5, call_graph->nodes().size());
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(a_call, a_call),
std::make_pair(a_call, a_call));
std::pair<HloInstruction*, HloInstruction*> null_pair = {nullptr, nullptr};
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(
b_map, c_computation->root_instruction()),
null_pair);
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(b_map, entry_while),
std::make_pair(entry_while, entry_while));
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(b_map, a_call),
std::make_pair(a_while, a_call));
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(a_while, a_call),
std::make_pair(a_while, a_call));
EXPECT_EQ(call_graph->NearestAncestorsInSameComputation(a_while, b_map),
std::make_pair(a_while, a_while));
}
TEST_F(CallGraphTest, NearestCommonAncestorInstructions) {
const std::string& hlo_string = R"(
HloModule module
ENTRY computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
p.2 = f32[10] parameter(2)
mul.0 = f32[10] multiply(p.1, p.2)
sub.0 = f32[10] subtract(add.0, mul.0)
add.1 = f32[10] add(add.0, p.2)
ROOT add.2 = f32[10] add(sub.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
namespace op = testing::opcode_matchers;
auto p0 = FindInstruction(hlo_module.get(), "p.0");
EXPECT_THAT(p0, op::Parameter());
auto p1 = FindInstruction(hlo_module.get(), "p.1");
EXPECT_THAT(p1, op::Parameter());
auto p2 = FindInstruction(hlo_module.get(), "p.2");
EXPECT_THAT(p2, op::Parameter());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto mul0 = FindInstruction(hlo_module.get(), "mul.0");
EXPECT_THAT(mul0, op::Multiply());
auto sub0 = FindInstruction(hlo_module.get(), "sub.0");
EXPECT_THAT(sub0, op::Subtract());
auto add1 = FindInstruction(hlo_module.get(), "add.1");
EXPECT_THAT(add1, op::Add());
auto add2 = FindInstruction(hlo_module.get(), "add.2");
EXPECT_THAT(add2, op::Add());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module.get());
EXPECT_EQ(1, call_graph->nodes().size());
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p0})),
absl::flat_hash_set<const HloInstruction*>({p0}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p1})),
absl::flat_hash_set<const HloInstruction*>({add0}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p1, p2})),
absl::flat_hash_set<const HloInstruction*>({sub0, add1}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, add1})),
absl::flat_hash_set<const HloInstruction*>({add1}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p1, add0})),
absl::flat_hash_set<const HloInstruction*>({add0}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, p2})),
absl::flat_hash_set<const HloInstruction*>({sub0, add1}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p0, add2})),
absl::flat_hash_set<const HloInstruction*>({add2}));
EXPECT_EQ(call_graph->NearestCommonAncestorInstructions(
std::vector<const HloInstruction*>({p2, mul0, sub0})),
absl::flat_hash_set<const HloInstruction*>({sub0}));
}
TEST_F(CallGraphTest, NearestCommonAncestorComputations) {
auto module = CreateNewVerifiedModule();
HloComputation* cond_computation =
module->AddEmbeddedComputation(MakeConditionComputation());
HloComputation* c_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
HloComputation* b_computation = module->AddEmbeddedComputation(
MakeMappingComputation(c_computation, 1));
HloComputation* a_computation;
{
HloComputation::Builder builder(TestName() + ".a");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
HloInstruction* a_call = builder.AddInstruction(
HloInstruction::CreateCall(kScalarShape, {param0}, c_computation));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, b_computation, a_call));
a_computation = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* entry_computation;
{
HloComputation::Builder builder(TestName() + ".entry");
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, kScalarShape, "param0"));
builder.AddInstruction(HloInstruction::CreateWhile(
kScalarShape, cond_computation, a_computation, param0));
entry_computation = module->AddEntryComputation(builder.Build());
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(5, call_graph->nodes().size());
EXPECT_EQ(
call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>({a_computation, a_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
EXPECT_EQ(
call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>({b_computation, c_computation})),
absl::flat_hash_set<const HloComputation*>({b_computation}));
EXPECT_EQ(call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>(
{a_computation, b_computation, c_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
EXPECT_EQ(call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>(
{c_computation, cond_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
EXPECT_EQ(call_graph->NearestCommonAncestorComputations(
std::vector<const HloComputation*>(
{b_computation, cond_computation})),
absl::flat_hash_set<const HloComputation*>({a_computation}));
}
TEST_F(CallGraphTest, VisitSingletonComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
std::vector<HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes([&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
}));
EXPECT_THAT(visited, UnorderedElementsAre(computation));
}
TEST_F(CallGraphTest, VisitUnreachableComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(MakeScalarComputation());
HloComputation* unreachable_computation =
module->AddEmbeddedComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
{
std::vector<const HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes(
[&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
},
false));
EXPECT_EQ(visited.size(), 1);
EXPECT_EQ(visited[0], entry_computation);
}
{
std::vector<HloComputation*> visited;
TF_ASSERT_OK(call_graph->VisitNodes(
[&visited](const CallGraphNode& node) {
visited.push_back(node.computation());
return absl::OkStatus();
},
true));
EXPECT_EQ(visited.size(), 2);
EXPECT_THAT(visited, UnorderedElementsAre(entry_computation,
unreachable_computation));
}
}
TEST_F(CallGraphTest, VisitWithError) {
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(MakeScalarComputation());
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
absl::Status status = call_graph->VisitNodes(
[](const CallGraphNode&) { return Internal("Visitation failed"); });
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), tsl::error::INTERNAL);
ASSERT_THAT(status.message(), ::testing::HasSubstr("Visitation failed"));
}
TEST_F(CallGraphTest, ExecutionThread) {
HloComputation::Builder builder(TestName());
constexpr char kParallelThreadName[] = "parallel_thread";
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
kScalarShape, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* main_thread_computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
main_thread_computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, kParallelThreadName));
auto* parallel_thread_computation = async_done->async_wrapped_computation();
{
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get());
EXPECT_EQ(call_graph->nodes().size(), 2);
const CallGraphNode& main_thread_node =
call_graph->GetNode(main_thread_computation);
const CallGraphNode& parallel_thread_node =
call_graph->GetNode(parallel_thread_computation);
EXPECT_EQ(main_thread_node.callers().size(), 0);
EXPECT_EQ(main_thread_node.callees().size(), 1);
EXPECT_EQ(main_thread_node.depth(), 0);
EXPECT_EQ(parallel_thread_node.callers().size(), 1);
EXPECT_EQ(parallel_thread_node.callees().size(), 0); |
1,931 | cpp | tensorflow/tensorflow | topk_rewriter | third_party/xla/xla/service/topk_rewriter.cc | third_party/xla/xla/service/topk_rewriter_test.cc | #ifndef XLA_SERVICE_TOPK_REWRITER_H_
#define XLA_SERVICE_TOPK_REWRITER_H_
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class TopkRewriter : public HloModulePass {
public:
explicit TopkRewriter(std::function<bool(const HloSortInstruction*, int64_t)>
is_profitable_to_convert)
: is_profitable_to_convert_(std::move(is_profitable_to_convert)) {}
absl::string_view name() const override { return "topk-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
std::optional<int64_t> SortIsInTopK(HloInstruction* inst);
absl::StatusOr<bool> TransformToCustomCall(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
private:
std::function<bool(const HloSortInstruction*, int64_t)>
is_profitable_to_convert_;
absl::StatusOr<HloInstruction*> TransformPatternToCustomCall(
HloInstruction* inst);
};
class TopkDecomposer : public HloModulePass {
public:
absl::string_view name() const override { return "topk-decomposer"; }
explicit TopkDecomposer(HloPredicate should_decompose = {})
: should_decompose_(should_decompose) {}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloPredicate should_decompose_;
};
}
#endif
#include "xla/service/topk_rewriter.h"
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "xla/client/lib/comparators.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace m = match;
static absl::StatusOr<HloComputation*> BuilderToHloComputation(
XlaComputation& comp, HloComputation* sibling_computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comp.proto(), config));
HloModule* dest_module = sibling_computation->parent();
HloCloneContext context(dest_module);
return dest_module->DeepCloneComputation(new_module->entry_computation(),
&context);
}
static bool IsNanSafeGt(HloComputation* comp) {
namespace m = match;
auto match_bitcast_f32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_f32_with_convert = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
auto max_u32 =
m::Convert(m::ConstantScalar(std::numeric_limits<int32_t>::max()))
.WithShape(m::Shape().WithElementType(U32));
return m::Select(m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(m::Subtract(max_u32, param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16 = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16_with_convert = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
auto max_u32 =
m::Convert(m::ConstantScalar(std::numeric_limits<int32_t>::max()))
.WithShape(m::Shape().WithElementType(U32));
return m::Select(m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(m::Subtract(max_u32, param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_generic_iec559 = [](int64_t parameter_number,
PrimitiveType fp_type,
PrimitiveType int_type) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(fp_type));
auto signed_value = m::BitcastConvert(param).WithShape(
m::Shape().WithElementType(int_type));
int64_t bit_width = primitive_util::BitWidth(fp_type);
auto max_value = m::ConstantScalar(LsbMask<uint64_t>(bit_width - 1));
auto flipped_value = m::XorAnyOrder(max_value, signed_value);
auto is_negative = m::Lt(signed_value, m::ConstantScalar(0));
return m::Select(is_negative, flipped_value, signed_value);
};
auto match_generic_iec559_with_convert =
[](int64_t parameter_number, PrimitiveType param_type,
PrimitiveType fp_type, PrimitiveType int_type) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(param_type));
auto convert =
m::Convert(param).WithShape(m::Shape().WithElementType(fp_type));
auto signed_value = m::BitcastConvert(convert).WithShape(
m::Shape().WithElementType(int_type));
int64_t bit_width = primitive_util::BitWidth(fp_type);
auto max_value = m::ConstantScalar(LsbMask<uint64_t>(bit_width - 1));
auto flipped_value = m::XorAnyOrder(max_value, signed_value);
auto is_negative = m::Lt(signed_value, m::ConstantScalar(0));
return m::Select(is_negative, flipped_value, signed_value);
};
auto match_s32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(S32));
return param;
};
auto match_compare = [](PrimitiveType type) {
auto param0 = m::Parameter(0).WithShape(m::Shape().WithElementType(type));
auto param1 = m::Parameter(1).WithShape(m::Shape().WithElementType(type));
return m::Gt(param0, param1);
};
auto match_default_compare = [](PrimitiveType type) {
auto params_with_type = [&](int i, PrimitiveType t) {
return m::Parameter(i).WithShape(m::Shape().WithElementType(t));
};
auto params =
std::vector({
params_with_type(0, type), params_with_type(1, type),
params_with_type(2, S32), params_with_type(3, S32)});
auto const_true = m::Broadcast(m::Constant());
auto values_gt = m::Gt(params[0], params[1]);
return m::Select(const_true, values_gt, const_true);
};
auto match_all_types = [](HloInstruction* root, auto callback) {
bool result = false;
for (auto type : {BF16, F32, S32, U32}) {
result = result || Match(root, callback(type));
}
return result;
};
return Match(comp->root_instruction(),
m::Gt(match_generic_iec559(0, F32, S32),
match_generic_iec559(1, F32, S32))) ||
Match(comp->root_instruction(),
m::Gt(match_generic_iec559(0, BF16, S16),
match_generic_iec559(1, BF16, S16))) ||
Match(comp->root_instruction(),
m::Gt(match_generic_iec559_with_convert(0, BF16, F32, S32),
match_generic_iec559_with_convert(1, BF16, F32, S32))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_f32_with_convert(0),
match_bitcast_f32_with_convert(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16_with_convert(0),
match_bitcast_bf16_with_convert(1))) ||
Match(comp->root_instruction(), m::Gt(match_s32(0), match_s32(1))) ||
match_all_types(comp->root_instruction(), match_compare) ||
match_all_types(comp->root_instruction(), match_default_compare);
}
static bool HasIota(HloSortInstruction* sort, HloInstruction* data) {
namespace m = match;
const std::array<int64_t, 1> sort_dims = {
data->shape().dimensions(sort->sort_dimension())};
auto match_iota = [](auto dims) {
return m::Iota().WithShape(m::Shape().WithElementType(S32).WithDims(dims));
};
return Match(sort->operand(1), match_iota(data->shape().dimensions())) ||
Match(sort->operand(1), m::Broadcast(match_iota(sort_dims)));
}
std::optional<int64_t> TopkRewriter::SortIsInTopK(HloInstruction* inst) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
if (sort == nullptr) {
return std::nullopt;
}
if (sort->operand_count() != 1 && sort->operand_count() != 2) {
return std::nullopt;
}
HloInstruction* data = sort->mutable_operand(0);
if (sort->operand_count() == 2 && !HasIota(sort, data)) {
return std::nullopt;
}
if (!IsNanSafeGt(sort->to_apply())) {
return std::nullopt;
}
const int64_t sort_dim = sort->sort_dimension();
bool supported = true;
std::optional<int64_t> k;
for (HloInstruction* user : sort->users()) {
const HloInstruction* slice = user;
if (sort->operand_count() == 2) {
if (user->opcode() != HloOpcode::kGetTupleElement ||
user->user_count() != 1) {
supported = false;
break;
}
slice = user->users()[0];
}
if (slice->opcode() != HloOpcode::kSlice) {
supported = false;
break;
}
if (absl::c_any_of(slice->slice_starts(), [](int x) { return x != 0; }) ||
absl::c_any_of(slice->slice_strides(), [](int x) { return x != 1; })) {
supported = false;
break;
}
for (int64_t i = 0; i < slice->slice_limits().size(); ++i) {
if (i != sort_dim &&
slice->slice_limits(i) != slice->operand(0)->shape().dimensions(i)) {
supported = false;
break;
}
}
if (!supported) {
break;
}
if (k == std::nullopt) {
k = slice->slice_limits(sort_dim);
} else if (k != slice->slice_limits(sort_dim)) {
supported = false;
break;
}
}
if (k == std::nullopt || !supported) {
return std::nullopt;
}
return k;
}
struct TopKCustomCall {
HloInstruction* topk;
HloInstruction* value_gte;
HloInstruction* index_gte;
};
TopKCustomCall CreateTopKCustomCall(HloInstruction* input,
const int64_t sort_dim, const int64_t k,
HloComputation* comparator,
HloComputation* comp) {
Shape data_shape = input->shape();
PrimitiveType element_type = data_shape.element_type();
bool has_batch = data_shape.rank() >= 2;
int64_t input_size = data_shape.dimensions(sort_dim);
int64_t batch_size = 1;
Shape topk_input_shape;
if (has_batch) {
batch_size =
ShapeUtil::ElementsIn(data_shape) / data_shape.dimensions(sort_dim);
topk_input_shape =
ShapeUtil::MakeShape(element_type, {batch_size, input_size});
if (data_shape.rank() > 2) {
input = comp->AddInstruction(HloInstruction::CreateReshape(
sort_dim == 0
? ShapeUtil::MakeShape(element_type, {input_size, batch_size})
: ShapeUtil::MakeShape(element_type, {batch_size, input_size}),
input));
}
if (sort_dim == 0) {
input = comp->AddInstruction(
HloInstruction::CreateTranspose(topk_input_shape, input, {1, 0}));
}
} else {
topk_input_shape = data_shape;
}
Shape topk_shape =
has_batch
? ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, {batch_size, k}),
ShapeUtil::MakeShape(S32, {batch_size, k})})
: ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(element_type, {k}),
ShapeUtil::MakeShape(S32, {k})});
HloInstruction* topk = comp->AddInstruction(HloInstruction::CreateCustomCall(
topk_shape, {input}, comparator, "TopK"));
HloInstruction* value_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(0), topk, 0));
HloInstruction* index_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(1), topk, 1));
if (has_batch) {
if (sort_dim == 0) {
value_gte = comp->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(element_type, {k, batch_size}), value_gte,
{1, 0}));
index_gte = comp->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {k, batch_size}), index_gte, {1, 0}));
}
if (data_shape.rank() > 2) {
std::vector<int64_t> shape_dim(data_shape.dimensions().begin(),
data_shape.dimensions().end());
shape_dim[sort_dim] = k;
value_gte = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(element_type, shape_dim), value_gte));
index_gte = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, shape_dim), index_gte));
}
}
return {topk, value_gte, index_gte};
}
absl::StatusOr<HloInstruction*> TopkRewriter::TransformPatternToCustomCall(
HloInstruction* inst) {
std::optional<int64_t> k = SortIsInTopK(inst);
if (!k) {
return nullptr;
}
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
HloInstruction* data = sort->mutable_operand(0);
const PrimitiveType element_type = data->shape().element_type();
if (element_type != F32 && element_type != BF16) {
return nullptr;
}
const int64_t sort_dim = sort->sort_dimension();
if (sort_dim != 0 && sort_dim != data->shape().rank() - 1) {
return nullptr;
}
if (!is_profitable_to_convert_(sort, *k)) {
return nullptr;
}
TopKCustomCall topkcc = CreateTopKCustomCall(
data, sort_dim, k.value(), sort->to_apply(), inst->parent());
for (HloInstruction* user : sort->users()) {
if (sort->operand_count() == 2) {
HloInstruction* gte = user;
for (HloInstruction* slice : gte->users()) {
if (gte->tuple_index() == 0) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(topkcc.value_gte));
} else if (gte->tuple_index() == 1) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(topkcc.index_gte));
} else {
LOG(FATAL) << "Sort with more than 2 output isn't supported in "
"topk rewriter";
}
}
} else {
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(topkcc.value_gte));
}
}
return topkcc.topk;
}
absl::StatusOr<bool> TopkRewriter::TransformToCustomCall(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(HloInstruction * topkcc,
TransformPatternToCustomCall(inst));
if (topkcc != nullptr) {
VLOG(2) << "Rewritten Topk: " << topkcc->ToString();
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> TopkRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(auto transform_to_customcall_changed,
TransformToCustomCall(module, execution_threads));
changed |= transform_to_customcall_changed;
return changed;
}
class TopkDecomposerVisitor : public DfsHloRewriteVisitor {
public:
explicit TopkDecomposerVisitor(HloPredicate should_decompose)
: should_decompose_(should_decompose) {}
absl::Status HandleCustomCall(HloInstruction* inst) override {
if (should_decompose_ && !should_decompose_(inst)) {
return absl::OkStatus();
}
HloCustomCallInstruction* call = DynCast<HloCustomCallInstruction>(inst);
if (call == nullptr || call->custom_call_target() != "TopK") {
return absl::OkStatus();
}
HloComputation* comparator = call->to_apply();
return DecomposeTopK(call, comparator);
}
absl::Status HandleTopK(HloInstruction* topk) override {
if (should_decompose_ && !should_decompose_(topk)) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(HloComputation * comparator,
CreateVariadicComparator(topk));
return DecomposeTopK(topk, comparator);
}
private:
bool HasSingleUserReadingOnlyTheValueOutput(HloInstruction* inst) {
return inst->user_count() == 1 && inst->users().front()->tuple_index() == 0;
}
absl::StatusOr<HloComputation*> CreateVariadicComparator(
HloInstruction* inst) {
HloTopKInstruction* topk = DynCast<HloTopKInstruction>(inst);
XlaBuilder b(absl::StrCat("comparator_", topk->name()));
std::vector<PrimitiveType> ptypes = {
topk->operand(0)->shape().element_type()};
if (!HasSingleUserReadingOnlyTheValueOutput(inst)) {
ptypes.emplace_back(PrimitiveType::S32);
}
XlaComputation comparison = topk->largest()
? CreateScalarGtComputation(ptypes, &b)
: CreateScalarLtComputation(ptypes, &b);
TF_ASSIGN_OR_RETURN(HloComputation * comparator,
BuilderToHloComputation(comparison, topk->parent()));
return comparator;
}
absl::Status DecomposeTopK(HloInstruction* call,
HloComputation* variadic_comparator) {
HloComputation* comp = call->parent();
HloInstruction* input = call->mutable_operand(0);
Shape iota_shape = input->shape();
iota_shape.set_element_type(S32);
size_t sort_dimension = input->shape().dimensions_size() - 1;
std::vector<int64_t> zeroes(iota_shape.rank(), 0);
std::vector<int64_t> ones(iota_shape.rank(), 1);
auto slice_tuple = [&](HloInstruction* sort, const size_t index) {
return comp->AddInstruction(HloInstruction::CreateSlice(
call->shape().tuple_shapes(index),
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
sort->shape().tuple_shapes(index), sort, index)),
zeroes, call->shape().tuple_shapes(index).dimensions(), ones));
};
CHECK_NE(variadic_comparator, nullptr);
if (HasSingleUserReadingOnlyTheValueOutput(call) &&
variadic_comparator->num_parameters() == 2) {
HloInstruction* sort = comp->AddInstruction(HloInstruction::CreateSort(
{input->shape()}, sort_dimension, {input}, variadic_comparator,
true));
TF_RETURN_IF_ERROR(ReplaceInstruction(
call->users().front(),
comp->AddInstruction(HloInstruction::CreateSlice(
call->shape().tuple_shapes(0), sort, zeroes,
call->shape().tuple_shapes(0).dimensions(), ones))));
sort->set_metadata(call->metadata());
} else {
HloInstruction* iota = comp->AddInstruction(
HloInstruction::CreateIota(iota_shape, iota_shape.rank() - 1));
HloInstruction* sort = comp->AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({input->shape(), iota_shape}),
sort_dimension, {input, iota}, variadic_comparator,
true));
TF_RETURN_IF_ERROR(ReplaceInstruction(
call, comp->AddInstruction(HloInstruction::CreateTuple(
{slice_tuple(sort, 0), slice_tuple(sort, 1)}))));
sort->set_metadata(call->metadata());
}
return absl::OkStatus();
}
private:
HloPredicate should_decompose_;
};
absl::StatusOr<bool> TopkDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return TopkDecomposerVisitor(should_decompose_)
.RunOnModule(module, execution_threads);
}
} | #include "xla/service/topk_rewriter.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <optional>
#include <utility>
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace m = ::xla::match;
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::tsl::testing::IsOkAndHolds;
using TopkRewriterTest = HloTestBase;
std::string getComparator() {
return R"(
%compare {
%p.1.lhs.8 = s32[] parameter(2)
%p.1.rhs.9 = s32[] parameter(3)
%p.0.lhs.6 = f32[] parameter(0)
%bitcast-convert.11 = s32[] bitcast-convert(%p.0.lhs.6)
%constant.15 = s32[] constant(0)
%compare.16 = pred[] compare(%bitcast-convert.11, %constant.15), direction=LT
%constant.10 = u32[] constant(2147483647)
%bitcast-convert.12 = u32[] bitcast-convert(%p.0.lhs.6)
%subtract.13 = u32[] subtract(%constant.10, %bitcast-convert.12)
%bitcast-convert.14 = s32[] bitcast-convert(%subtract.13)
%select.17 = s32[] select(%compare.16, %bitcast-convert.14,
%bitcast-convert.11)
%p.0.rhs.7 = f32[] parameter(1)
%bitcast-convert.19 = s32[] bitcast-convert(%p.0.rhs.7)
%constant.23 = s32[] constant(0)
%compare.24 = pred[] compare(%bitcast-convert.19, %constant.23), direction=LT
%constant.18 = u32[] constant(2147483647)
%bitcast-convert.20 = u32[] bitcast-convert(%p.0.rhs.7)
%subtract.21 = u32[] subtract(%constant.18, %bitcast-convert.20)
%bitcast-convert.22 = s32[] bitcast-convert(%subtract.21)
%select.25 = s32[] select(%compare.24, %bitcast-convert.22,
%bitcast-convert.19)
ROOT %compare.26 = pred[] compare(%select.17, %select.25), direction=GT
})";
}
std::string getConvertMaxComparator() {
return R"(
%compare {
%p.1.lhs.6 = s32[] parameter(2)
%p.1.rhs.7 = s32[] parameter(3)
%p.0.lhs.4 = f32[] parameter(0)
%bitcast-convert = s32[] bitcast-convert(f32[] %p.0.lhs.4)
%constant = s32[] constant(0)
%compare = pred[] compare(s32[] %bitcast-convert, s32[] %constant), direction=LT
%constant.1 = s32[] constant(2147483647)
%convert = u32[] convert(s32[] %constant.1)
%bitcast-convert.1 = u32[] bitcast-convert(f32[] %p.0.lhs.4)
%subtract = u32[] subtract(u32[] %convert, u32[] %bitcast-convert.1)
%bitcast-convert.2 = s32[] bitcast-convert(u32[] %subtract)
%select = s32[] select(pred[] %compare, s32[] %bitcast-convert.2, s32[] %bitcast-convert)
%p.0.rhs.5 = f32[] parameter(1)
%bitcast-convert.3 = s32[] bitcast-convert(f32[] %p.0.rhs.5)
%compare.1 = pred[] compare(s32[] %bitcast-convert.3, s32[] %constant), direction=LT
%bitcast-convert.4 = u32[] bitcast-convert(f32[] %p.0.rhs.5)
%subtract.1 = u32[] subtract(u32[] %convert, u32[] %bitcast-convert.4)
%bitcast-convert.5 = s32[] bitcast-convert(u32[] %subtract.1)
%select.1 = s32[] select(pred[] %compare.1, s32[] %bitcast-convert.5, s32[] %bitcast-convert.3)
ROOT %compare.2 = pred[] compare(s32[] %select, s32[] %select.1), direction=GT
})";
}
std::string getComparatorNoIota() {
return R"(
%compare {
%p.0.lhs.6 = f32[] parameter(0)
%bitcast-convert.11 = s32[] bitcast-convert(%p.0.lhs.6)
%constant.15 = s32[] constant(0)
%compare.16 = pred[] compare(%bitcast-convert.11, %constant.15), direction=LT
%constant.10 = u32[] constant(2147483647)
%bitcast-convert.12 = u32[] bitcast-convert(%p.0.lhs.6)
%subtract.13 = u32[] subtract(%constant.10, %bitcast-convert.12)
%bitcast-convert.14 = s32[] bitcast-convert(%subtract.13)
%select.17 = s32[] select(%compare.16, %bitcast-convert.14,
%bitcast-convert.11)
%p.0.rhs.7 = f32[] parameter(1)
%bitcast-convert.19 = s32[] bitcast-convert(%p.0.rhs.7)
%constant.23 = s32[] constant(0)
%compare.24 = pred[] compare(%bitcast-convert.19, %constant.23), direction=LT
%constant.18 = u32[] constant(2147483647)
%bitcast-convert.20 = u32[] bitcast-convert(%p.0.rhs.7)
%subtract.21 = u32[] subtract(%constant.18, %bitcast-convert.20)
%bitcast-convert.22 = s32[] bitcast-convert(%subtract.21)
%select.25 = s32[] select(%compare.24, %bitcast-convert.22,
%bitcast-convert.19)
ROOT %compare.26 = pred[] compare(%select.17, %select.25), direction=GT
})";
}
std::string getCompareComparator() {
return R"(
%compare {
%Arg_0.100 = f32[] parameter(0)
%Arg_1.101 = f32[] parameter(1)
%Arg_2.102 = s32[] parameter(2)
%Arg_3.103 = s32[] parameter(3)
ROOT %compare.56364 = pred[] compare(f32[] %Arg_0.100, f32[] %Arg_1.101), direction=GT, type=TOTALORDER
})";
}
std::string getStableComparator() {
return R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
})";
}
bool IsStableSort(const HloInstruction* inst) {
auto* sort = DynCast<HloSortInstruction>(inst);
return sort != nullptr && sort->is_stable();
}
TEST_F(TopkRewriterTest, Rewrite) {
for (std::string comparator :
{getComparator(), getCompareComparator(), getStableComparator()}) {
const std::string hlo_string = R"(
HloModule module
)" + comparator + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
}
TEST_F(TopkRewriterTest, RewriteWithBroadcast) {
for (std::string comparator :
{getComparator(), getCompareComparator(), getStableComparator()}) {
const std::string hlo_string = R"(
HloModule module
)" + comparator + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[1234567]{0} iota(), iota_dimension=0
%broadcast.5 = s32[8,1234567]{1,0} broadcast(iota.4), dimensions={1}
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %broadcast.5),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
}
TEST_F(TopkRewriterTest, RewriteWithConvertMaxComparator) {
const std::string hlo_string = R"(
HloModule module
)" + getConvertMaxComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteUnbatched) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234567] parameter(0)
%iota.4 = s32[1234567] iota(), iota_dimension=0
%sort.27 = (f32[1234567], s32[1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteTranspose) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234567,8] parameter(0)
%iota.4 = s32[1234567,8] iota(), iota_dimension=0
%sort.27 = (f32[1234567,8], s32[1234567,8]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234567,8] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5,8] slice(%get-tuple-element.28), slice={[0:5], [0:8]}
%get-tuple-element.30 = s32[1234567,8] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5,8] slice(%get-tuple-element.30), slice={[0:5], [0:8]}
ROOT %tuple.32 = (f32[5,8], s32[5,8]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
LOG(INFO) << module->entry_computation()->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Transpose(m::GetTupleElement(
m::CustomCall(m::Transpose(m::Parameter(0))), 0)),
m::Transpose(m::GetTupleElement(
m::CustomCall(m::Transpose(m::Parameter(0))), 1)))));
const HloInstruction* cc = module->entry_computation()
->root_instruction()
->operand(0)
->operand(0)
->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteReshape) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[3,8,1234567] parameter(0)
%iota.4 = s32[3,8,1234567] iota(), iota_dimension=2
%sort.27 = (f32[3,8,1234567], s32[3,8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={2}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[3, 8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[3,8,5] slice(%get-tuple-element.28), slice={[0:3], [0:8], [0:5]}
%get-tuple-element.30 = s32[3,8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[3,8,5] slice(%get-tuple-element.30), slice={[0:3], [0:8], [0:5]}
ROOT %tuple.32 = (f32[3,8,5], s32[3,8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(m::Reshape(m::Parameter(0))), 0)),
m::Reshape(m::GetTupleElement(
m::CustomCall(m::Reshape(m::Parameter(0))), 1)))));
const HloInstruction* cc = module->entry_computation()
->root_instruction()
->operand(0)
->operand(0)
->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteNoIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparatorNoIota() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%sort.27 = f32[8,1234567] sort(%arg_tuple.1), dimensions={1}, is_stable=true, to_apply=%compare
ROOT %slice.29 = f32[8,5] slice(%sort.27), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RoundTripNoIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparatorNoIota() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%sort.27 = f32[8,1234567] sort(%arg_tuple.1), dimensions={1}, is_stable=true, to_apply=%compare
ROOT %slice.29 = f32[8,5] slice(%sort.27), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(
m::Sort(m::Parameter(0)).WithPredicate(IsStableSort))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTripOnlyIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[1234567]{0} iota(), iota_dimension=0
%broadcast.5 = s32[8,1234567]{1,0} broadcast(iota.4), dimensions={1}
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %broadcast.5),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = s32[8,1234567] get-tuple-element(%sort.27), index=1
ROOT %slice.29 = s32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort),
1))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTrip) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher =
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::GetTupleElement(sort_matcher, 0)),
m::Slice(m::GetTupleElement(sort_matcher, 1)))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTripValueOnly) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
ROOT %slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher =
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(m::GetTupleElement(sort_matcher, 0))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, SanityCheckOutput) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234] parameter(0)
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto topk_module = source_module->Clone();
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(topk_module.get()),
IsOkAndHolds(true));
auto decomposed_module = topk_module->Clone();
EXPECT_THAT(TopkDecomposer().Run(decomposed_module.get()),
IsOkAndHolds(true));
const size_t source_size = 1234;
std::vector<float> source(source_size);
std::iota(source.begin(), source.end(), 80000);
auto input = LiteralUtil::CreateR1<float>(source);
std::vector<float> top_k({81233, 81232, 81231, 81230, 81229});
auto check_result = [&](std::unique_ptr<HloModule> module) {
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&input}));
LiteralTestUtil::ExpectR1Equal<float>(top_k, result.DecomposeTuple()[0]);
};
check_result(std::move(source_module));
check_result(std::move(decomposed_module));
}
TEST_F(TopkRewriterTest, Equivalent) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234] parameter(0)
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
};
EXPECT_TRUE(
RunAndCompare(std::move(source_module), std::nullopt, round_trip));
}
TEST_F(TopkRewriterTest, DecomposerStability) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%constant.1 = f32[] constant(42)
%broadcast.2= f32[1234] broadcast(f32[] %constant.1), dimensions={}
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%broadcast.2, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
};
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(source_module), std::nullopt,
round_trip));
}
TEST_F(TopkRewriterTest, TopKDecomposition) {
const std::string hlo_string = R"(
HloModule topk
ENTRY TopK {
x = bf16[10,10]{0,1} parameter(0)
ROOT topk = (bf16[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher = op::Sort(op::Parameter(0), op::Iota());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Slice(op::GetTupleElement(sort_matcher, 0)),
op::Slice(op::GetTupleElement(sort_matcher, 1))));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
}
}
} |
1,932 | cpp | tensorflow/tensorflow | dynamic_dimension_simplifier | third_party/xla/xla/service/dynamic_dimension_simplifier.cc | third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc | #ifndef XLA_SERVICE_DYNAMIC_DIMENSION_SIMPLIFIER_H_
#define XLA_SERVICE_DYNAMIC_DIMENSION_SIMPLIFIER_H_
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class DynamicDimensionSimplifier : public HloModulePass {
public:
absl::string_view name() const override {
return "dynamic-dimension-simplifier";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::StatusOr<bool> ConcatForwarding(HloInstruction* concat) {
if (concat->opcode() != HloOpcode::kConcatenate) {
return false;
}
bool changed = false;
auto parent = concat->parent();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : concat->operands()) {
if (operand->opcode() != HloOpcode::kConcatenate ||
operand->concatenate_dimension() != concat->concatenate_dimension()) {
new_operands.push_back(operand);
} else {
changed = true;
for (HloInstruction* operand_operand : operand->operands()) {
new_operands.push_back(operand_operand);
}
}
}
if (changed) {
auto new_concat = parent->AddInstruction(HloInstruction::CreateConcatenate(
concat->shape(), new_operands, concat->concatenate_dimension()));
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(concat, new_concat));
}
return changed;
}
absl::StatusOr<bool> SliceConcatForwarding(HloInstruction* slice) {
if (slice->opcode() != HloOpcode::kSlice) {
return false;
}
auto concat = slice->mutable_operand(0);
if (concat->opcode() != HloOpcode::kConcatenate) {
return false;
}
if (slice->shape().rank() != 1) {
return false;
}
int64_t concat_dim = concat->concatenate_dimension();
std::vector<HloInstruction*> new_operands;
int64_t size_so_far = 0;
int64_t slice_size = slice->shape().dimensions(concat_dim);
if (slice_size != slice->slice_limits(0) - slice->slice_starts(0)) {
return false;
}
if (slice->slice_strides(0) != 1) {
return false;
}
for (HloInstruction* operand : concat->operands()) {
if (size_so_far == slice->slice_starts(0) &&
operand->shape().dimensions(0) == slice_size) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(operand));
return true;
}
size_so_far += operand->shape().dimensions(concat_dim);
}
return false;
}
absl::StatusOr<bool> ReshapeBroadcastForwarding(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto broadcast = reshape->mutable_operand(0);
if (broadcast->opcode() != HloOpcode::kBroadcast) {
return false;
}
if (reshape->shape().rank() != 0) {
return false;
}
if (broadcast->shape().rank() != 1) {
return false;
}
if (broadcast->mutable_operand(0)->shape().rank() != 0) {
return false;
}
TF_RETURN_IF_ERROR(
reshape->ReplaceAllUsesWith(broadcast->mutable_operand(0)));
return true;
}
absl::StatusOr<bool> ReshapeReshapeForwarding(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto reshape_2 = reshape->mutable_operand(0);
if (reshape_2->opcode() != HloOpcode::kReshape) {
return false;
}
if (!Shape::Equal()(reshape->shape(), reshape_2->operand(0)->shape())) {
return false;
}
TF_RETURN_IF_ERROR(
reshape->ReplaceAllUsesWith(reshape_2->mutable_operand(0)));
return true;
}
absl::StatusOr<bool> IdentityConvertRemoving(HloInstruction* convert) {
if (convert->opcode() != HloOpcode::kConvert) {
return false;
}
auto operand = convert->mutable_operand(0);
if (Shape::Equal()(convert->shape(), operand->shape())) {
TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(operand));
return true;
}
return false;
}
absl::StatusOr<bool> IdentityReshapeRemoving(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto operand = reshape->mutable_operand(0);
if (Shape::Equal()(reshape->shape(), operand->shape())) {
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(operand));
return true;
}
return false;
}
}
absl::StatusOr<bool> DynamicDimensionSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "DynamicDimensionSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ConcatForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, SliceConcatForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeBroadcastForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeReshapeForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, IdentityConvertRemoving(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, IdentityReshapeRemoving(inst));
changed |= local_changed;
}
}
XLA_VLOG_LINES(
2, "DynamicDimensionSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/dynamic_dimension_simplifier.h"
#include <memory>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
class DynamicDimensionSimplifierTest : public HloTestBase {};
TEST_F(DynamicDimensionSimplifierTest, ForwardConcat) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat1 = s32[2] concatenate(p0, p1), dimensions={0}
ROOT concat2 = s32[3] concatenate(concat1, p2), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Concatenate(m::Parameter(0), m::Parameter(1),
m::Parameter(2))));
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatMultipleDims) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1, 1] parameter(0)
p1 = s32[1, 1] parameter(1)
p2 = s32[2, 1] parameter(2)
concat1 = s32[2, 1] concatenate(p0, p1), dimensions={0}
ROOT concat2 = s32[2, 2] concatenate(concat1, p2), dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, ForwardConcatSlice) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[1] slice(concat), slice={[1:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(1)));
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceSizeMismatch) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[2] slice(concat), slice={[1:3]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceStrided) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[1] slice(concat), slice={[1:2:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, BroadcastReshapeForwarding) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[] parameter(0)
broadcast = s32[1] broadcast(p0), dimensions={}
ROOT reshape = s32[] reshape(broadcast)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(DynamicDimensionSimplifierTest, ReshapeReshapeForwarding) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[] parameter(0)
reshape = s32[1] reshape(p0)
ROOT reshape2 = s32[] reshape(reshape)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(DynamicDimensionSimplifierTest,
DoNotReshapeReshapeForwardingShapeMismatch) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1, 1] parameter(0)
reshape = s32[1] reshape(p0)
ROOT reshape2 = s32[] reshape(reshape)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, IdConvertRemoving) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
ROOT reshape2 = s32[1] convert(p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
}
} |
1,933 | cpp | tensorflow/tensorflow | slice_sinker | third_party/xla/xla/service/slice_sinker.cc | third_party/xla/xla/service/slice_sinker_test.cc | #ifndef XLA_SERVICE_SLICE_SINKER_H_
#define XLA_SERVICE_SLICE_SINKER_H_
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class SliceSinker : public HloModulePass {
public:
absl::string_view name() const override { return "slice-sinker"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/slice_sinker.h"
#include <algorithm>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/shape_util.h"
namespace xla {
namespace {
bool SameSliceConfiguration(const HloInstruction* slice_1,
const HloInstruction* slice_2) {
CHECK_EQ(slice_1->opcode(), HloOpcode::kSlice);
CHECK_EQ(slice_2->opcode(), HloOpcode::kSlice);
CHECK(slice_1->operand(0)->shape().dimensions() ==
slice_2->operand(0)->shape().dimensions());
return slice_1->slice_starts() == slice_2->slice_starts() &&
slice_1->slice_limits() == slice_2->slice_limits() &&
slice_1->slice_strides() == slice_2->slice_strides();
}
bool IsElementwiseOperationOnSimilarSlices(const HloInstruction* inst) {
CHECK(inst->IsElementwise());
if (absl::c_any_of(inst->operands(), [](const HloInstruction* operand) {
return operand->opcode() != HloOpcode::kSlice;
})) {
return false;
}
const HloInstruction* slice0 = inst->operand(0);
return absl::c_all_of(absl::MakeSpan(inst->operands()).subspan(1),
[slice0](const HloInstruction* slice) {
return ShapeUtil::CompatibleIgnoringElementType(
slice0->operand(0)->shape(),
slice->operand(0)->shape()) &&
SameSliceConfiguration(slice0, slice);
});
}
bool IsSimilarOperationOnSlices(const HloInstruction* operation_on_slices,
const HloInstruction* candidate) {
if (candidate->user_count() == 0) {
return false;
}
if (!candidate->SameOp(*operation_on_slices) ||
operation_on_slices->shape().element_type() !=
candidate->shape().element_type()) {
return false;
}
const HloInstruction* operand_slice0 = candidate->operand(0);
for (int64_t i = 0; i < candidate->operand_count(); ++i) {
const HloInstruction* operand_slice = candidate->operand(i);
if (operand_slice->opcode() != HloOpcode::kSlice ||
operand_slice->operand(0) !=
operation_on_slices->operand(i)->operand(0) ||
!SameSliceConfiguration(operand_slice0, operand_slice)) {
return false;
}
}
return true;
}
bool ShouldTransform(const std::vector<HloInstruction*>& operations_on_slices) {
int64_t sum = 0;
for (HloInstruction* user : operations_on_slices) {
sum += ShapeUtil::ElementsIn(user->shape());
}
return sum >= xla::ShapeUtil::ElementsIn(
operations_on_slices[0]->operand(0)->operand(0)->shape());
}
std::optional<std::vector<HloInstruction*>> FindElementwiseOperationGroup(
const HloInstruction* operation_on_slices) {
std::vector<HloInstruction*> operations;
const HloInstruction* slice_source0 =
operation_on_slices->operand(0)->operand(0);
for (const HloInstruction* operand_slice0 : slice_source0->users()) {
if (operand_slice0->opcode() != HloOpcode::kSlice) {
continue;
}
for (HloInstruction* user : operand_slice0->users()) {
if (IsSimilarOperationOnSlices(operation_on_slices, user)) {
operations.push_back(user);
}
}
}
return ShouldTransform(operations) ? std::make_optional(operations)
: std::nullopt;
}
absl::Status SinkSlices(
const std::vector<HloInstruction*>& slice_sources,
const std::vector<HloInstruction*>& operation_on_slices) {
const Shape shape = slice_sources[0]->shape();
PrimitiveType element_type = operation_on_slices[0]->shape().element_type();
Shape new_shape = ShapeUtil::ChangeElementType(shape, element_type);
HloComputation* computation = operation_on_slices[0]->parent();
auto operation_on_slice_sources = computation->AddInstruction(
operation_on_slices[0]->CloneWithNewOperands(new_shape, slice_sources));
VLOG(10) << "Adding operation_on_slice_sources: "
<< operation_on_slice_sources->ToString();
for (HloInstruction* user : operation_on_slices) {
const HloInstruction* operand_slice = user->operand(0);
auto user_slice =
computation->AddInstruction(operand_slice->CloneWithNewOperands(
user->shape(), {operation_on_slice_sources}));
VLOG(10) << "Adding new slice: " << user_slice->ToString()
<< " to replace: " << user->ToString();
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(user_slice));
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> SliceSinker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (!instruction->IsElementwise() || instruction->operand_count() == 0 ||
instruction->user_count() == 0) {
continue;
}
VLOG(10) << "Processing instruction : " << instruction->ToString();
if (!IsElementwiseOperationOnSimilarSlices(instruction)) {
continue;
}
std::optional<std::vector<HloInstruction*>> similar_operations =
FindElementwiseOperationGroup(instruction);
if (!similar_operations.has_value()) {
continue;
}
std::vector<HloInstruction*> slice_sources;
absl::c_transform(
instruction->operands(), std::back_inserter(slice_sources),
[](HloInstruction* slice) { return slice->mutable_operand(0); });
TF_RETURN_IF_ERROR(SinkSlices(slice_sources, similar_operations.value()));
changed = true;
}
}
return changed;
}
} | #include "xla/service/slice_sinker.h"
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using ::testing::ElementsAre;
class SliceSinkerTest : public HloTestBase {};
TEST_F(SliceSinkerTest, TernaryOperation) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = pred[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
p2 = f32[8,9] parameter(2)
s00 = pred[2,9] slice(pred[8,9] p0), slice={[0:2], [0:9]}
s01 = pred[6,9] slice(pred[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
s20 = f32[2,9] slice(f32[8,9] p2), slice={[0:2], [0:9]}
s21 = f32[6,9] slice(f32[8,9] p2), slice={[2:8], [0:9]}
sel0 = f32[2,9] select(pred[2,9] s00, f32[2,9] s10, f32[2,9] s20)
sel1 = f32[6,9] select(pred[6,9] s01, f32[6,9] s11, f32[6,9] s21)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(sel0, sel1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2))),
m::Slice(&slice1, m::Select(m::Parameter(0), m::Parameter(1),
m::Parameter(2))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, OverlappingPartialSlicesBeneficial) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]}
s02 = f32[8,4] slice(f32[8,9] p0), slice={[0:8], [0:4]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]}
s12 = f32[8,4] slice(f32[8,9] p1), slice={[0:8], [0:4]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
add2 = f32[8,4] add(f32[8,4] s02, f32[8,4] s12)
ROOT tuple = (f32[2,9], f32[5,9], f32[8,4]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 4));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, SameSliceSourcesTwoPeerGroups) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s02 = f32[8,2] slice(f32[8,9] p0), slice={[0:8], [0:2]}
s03 = f32[8,7] slice(f32[8,9] p0), slice={[0:8], [2:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
s12 = f32[8,2] slice(f32[8,9] p1), slice={[0:8], [0:2]}
s13 = f32[8,7] slice(f32[8,9] p1), slice={[0:8], [2:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
mul0 = f32[8,2] multiply(f32[8,2] s02, f32[8,2] s12)
mul1 = f32[8,7] multiply(f32[8,7] s03, f32[8,7] s13)
ROOT tuple = (f32[2,9], f32[6,9], f32[8,2], f32[8,7]) tuple(add0, add1, mul0, mul1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
const HloInstruction* slice3;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 2));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice3->slice_starts(), ElementsAre(0, 2));
EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, OverlappingMultipleSlices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]}
s02 = f32[3,9] slice(f32[8,9] p0), slice={[2:5], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]}
s12 = f32[3,9] slice(f32[8,9] p1), slice={[2:5], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
add2 = f32[3,9] add(f32[3,9] s02, f32[3,9] s12)
ROOT tuple = (f32[2,9], f32[5,9], f32[3,9]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(5, 9));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, DisjointedPartialSlices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[5,9] slice(f32[8,9] p0), slice={[2:7], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[5,9] slice(f32[8,9] p1), slice={[2:7], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11)
ROOT tuple = (f32[2,9], f32[5,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, OverlappingPartialSlicesNotBeneficial) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,7] slice(f32[8,9] p0), slice={[0:2], [0:7]}
s01 = f32[6,7] slice(f32[8,9] p0), slice={[2:8], [0:7]}
s10 = f32[2,7] slice(f32[8,9] p1), slice={[0:2], [0:7]}
s11 = f32[6,7] slice(f32[8,9] p1), slice={[2:8], [0:7]}
add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10)
add1 = f32[6,7] add(f32[6,7] s01, f32[6,7] s11)
ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, DifferentOrderingOfSliceSources) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,7] parameter(0)
p1 = f32[8,7] parameter(1)
s00 = f32[2,7] slice(f32[8,7] p0), slice={[0:2], [0:7]}
s01 = f32[6,7] slice(f32[8,7] p0), slice={[2:8], [0:7]}
s10 = f32[2,7] slice(f32[8,7] p1), slice={[0:2], [0:7]}
s11 = f32[6,7] slice(f32[8,7] p1), slice={[2:8], [0:7]}
add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10)
add1 = f32[6,7] add(f32[6,7] s11, f32[6,7] s01)
ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesFromDifferentIndices) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:4], [0:9]}
s01 = f32[4,9] slice(f32[8,9] p0), slice={[4:8], [0:9]}
s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:4], [0:9]}
s11 = f32[4,9] slice(f32[8,9] p1), slice={[4:8], [0:9]}
add0 = f32[4,9] add(f32[4,9] s01, f32[4,9] s10)
add1 = f32[4,9] add(f32[4,9] s00, f32[4,9] s11)
ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, DifferentOperator) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
mul = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10)
add = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(mul, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SameOperatorDifferentAttributes) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
cmp1 = pred[2,9] compare(f32[2,9] s00, f32[2,9] s10), direction=GT
cmp2 = pred[6,9] compare(f32[6,9] s01, f32[6,9] s11), direction=LT
ROOT tuple = (pred[2,9], pred[6,9]) tuple(cmp1, cmp2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesWithMultiUsers) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11)
mul0 = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10)
mul1 = f32[6,9] multiply(f32[6,9] s01, f32[6,9] s11)
ROOT tuple = (f32[2,9], f32[6,9], f32[2,9], f32[6,9]) tuple(add0, add1, mul0, mul1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
const HloInstruction* slice2;
const HloInstruction* slice3;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice2->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice3->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, NonElementWise) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8] parameter(0)
s00 = f32[2] slice(f32[8] p0), slice={[0:2]}
s01 = f32[6] slice(f32[8] p0), slice={[2:8]}
bc0 = f32[2,9] broadcast(f32[2] s00), dimensions={0}
bc1 = f32[6,9] broadcast(f32[6] s01), dimensions={0}
ROOT tuple = (f32[2,9], f32[6,9]) tuple(bc0, bc1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, SlicesWithNontrivialStrides) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:7:2], [0:9]}
s01 = f32[4,9] slice(f32[8,9] p0), slice={[1:8:2], [0:9]}
s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:7:2], [0:9]}
s11 = f32[4,9] slice(f32[8,9] p1), slice={[1:8:2], [0:9]}
add0 = f32[4,9] add(f32[4,9] s00, f32[4,9] s10)
add1 = f32[4,9] add(f32[4,9] s01, f32[4,9] s11)
ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(
inst, GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))),
m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(7, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(2, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(1, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(2, 1));
}
TEST_F(SliceSinkerTest, NotAllSliceOperand) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[2,9] parameter(1)
p2 = f32[6,9] parameter(2)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
abs0 = f32[2,9] abs(f32[2,9] p1)
abs1 = f32[6,9] abs(f32[6,9] p2)
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
TEST_F(SliceSinkerTest, Cascade) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
p1 = f32[8,9] parameter(1)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]}
s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]}
abs0 = f32[2,9] abs(f32[2,9] s10)
abs1 = f32[6,9] abs(f32[6,9] s11)
add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0)
add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1)
ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_TRUE(result);
HloInstruction* inst = module->entry_computation()->root_instruction();
const HloInstruction* slice0;
const HloInstruction* slice1;
EXPECT_THAT(
inst,
GmockMatch(m::Tuple(
m::Slice(&slice0, m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))),
m::Slice(&slice1,
m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))))));
EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0));
EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9));
EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1));
EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0));
EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9));
EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1));
}
TEST_F(SliceSinkerTest, SameOpcodeDifferentResultElementTypes) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[8,9] parameter(0)
s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]}
s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]}
convert0 = s32[2,9] convert(f32[2,9] s00)
convert1 = s64[6,9] convert(f32[6,9] s01)
ROOT tuple = (s32[2,9], s64[6,9]) tuple(convert0, convert1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
SliceSinker slice_sinker;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get()));
EXPECT_FALSE(result);
}
}
} |
1,934 | cpp | tensorflow/tensorflow | scan_loop_accumulator_input_unification | third_party/xla/xla/service/scan_loop_accumulator_input_unification.cc | third_party/xla/xla/service/scan_loop_accumulator_input_unification_test.cc | #ifndef XLA_SERVICE_SCAN_LOOP_ACCUMULATOR_INPUT_UNIFICATION_H_
#define XLA_SERVICE_SCAN_LOOP_ACCUMULATOR_INPUT_UNIFICATION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ScanLoopAccumulatorInputUnification : public HloModulePass {
public:
~ScanLoopAccumulatorInputUnification() override = default;
explicit ScanLoopAccumulatorInputUnification() = default;
absl::string_view name() const override {
return "scan_loop_accumulator_input_unification";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/scan_loop_accumulator_input_unification.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool LoopIndexIsReadOnly(const HloAliasAnalysis& alias_analysis,
HloInstruction* while_instr, int64_t idx) {
const HloDataflowAnalysis& dataflow_analysis =
alias_analysis.dataflow_analysis();
return !(
dataflow_analysis.GetValueSet(while_instr->while_init(), {idx})
.values()
.size() > 1 ||
dataflow_analysis.GetValueSet(while_instr, {idx}).values().size() > 1 ||
dataflow_analysis.GetUniqueValueAt(while_instr, {idx}) !=
dataflow_analysis.GetUniqueValueAt(while_instr->while_init(), {idx}));
}
std::vector<std::pair<HloInstruction*, HloInstruction*>>
FindAccumulatorInputPairs(const HloAliasAnalysis& alias_analysis,
HloInstruction* while_instr,
const WhileLoopConfig& config) {
HloComputation* computation = while_instr->while_body();
HloInstruction* body_param = computation->parameter_instruction(0);
std::vector<HloInstruction*> possible_acc;
for (int64_t param_idx = 0;
param_idx < while_instr->while_init()->operand_count(); ++param_idx) {
for (HloInstruction* gte : body_param->users()) {
if (!Match(gte, match::GetTupleElement().WithTupleIndex(param_idx))) {
continue;
}
if (gte->operand(0) != body_param) {
continue;
}
if (gte->user_count() > 1 || gte->user_count() == 0) {
continue;
}
HloInstruction* gte_user = gte->users().at(0);
if (MatchShapeCoveringDynamicIndexInstruction(
gte_user, gte, HloOpcode::kDynamicUpdateSlice, config)
.has_value()) {
if (computation->root_instruction()->mutable_operand(param_idx) ==
gte_user) {
possible_acc.push_back(gte);
VLOG(3) << "accumulator index: " << param_idx << " = " << gte->name();
}
}
}
}
auto operand_index = [](HloInstruction* instr,
HloInstruction* operand) -> int64_t {
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (operand == instr->operand(i)) {
return i;
}
}
return -1;
};
auto find_gte_instr = [](HloInstruction* tuple,
int64_t idx) -> HloInstruction* {
for (HloInstruction* instr : tuple->parent()->MakeInstructionPostOrder()) {
HloInstruction* operand;
if (Match(instr, match::GetTupleElement()
.WithOperand(0, match::Op(&operand))
.WithTupleIndex(idx))) {
if (operand != tuple) {
continue;
}
return instr;
}
}
return nullptr;
};
auto check_single_user_not_null = [](HloInstruction* instr) -> bool {
if (instr == nullptr || instr->user_count() != 1) {
return false;
}
return true;
};
std::vector<std::pair<HloInstruction*, HloInstruction*>> acc_input_pairs;
HloComputation* outer_while_body = while_instr->parent();
for (HloInstruction* acc : possible_acc) {
VLOG(3) << "Looking for corresponding input for " << acc->name();
HloInstruction* acc_gte_outer_body =
find_gte_instr(while_instr, acc->tuple_index());
if (acc_gte_outer_body == nullptr) {
continue;
}
int64_t idx =
operand_index(outer_while_body->root_instruction(), acc_gte_outer_body);
VLOG(3) << "Accumulator output of the scan in the outer body = "
<< acc_gte_outer_body->name() << ", index = " << idx;
if (idx == -1) {
continue;
}
HloInstruction* input_gte_outer =
find_gte_instr(outer_while_body->parameter_instruction(0), idx);
if (!check_single_user_not_null(input_gte_outer)) {
continue;
}
if (input_gte_outer->users().at(0) != while_instr->while_init()) {
continue;
}
VLOG(3) << "Input parameter outer body = " << input_gte_outer->name()
<< ", index = " << input_gte_outer->tuple_index();
int64_t input_idx_inner =
operand_index(while_instr->while_init(), input_gte_outer);
HloInstruction* input_gte_inner =
find_gte_instr(computation->parameter_instruction(0), input_idx_inner);
if (!LoopIndexIsReadOnly(alias_analysis, while_instr, input_idx_inner)) {
continue;
}
VLOG(3) << "Input parameter scan body = " << input_gte_inner->name()
<< ", index = " << input_gte_inner->tuple_index();
HloInstruction* gte_user = input_gte_inner->users().at(0);
if (MatchShapeCoveringDynamicIndexInstruction(
gte_user, input_gte_inner, HloOpcode::kDynamicUpdateSlice, config)
.has_value()) {
acc_input_pairs.emplace_back(acc, input_gte_inner);
}
}
return acc_input_pairs;
}
absl::StatusOr<bool> UnifyAccumulatorWithInput(
const HloAliasAnalysis& alias_analysis,
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> unrollable_loops) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(&alias_analysis.dataflow_analysis().module());
auto is_while_body = [&](HloComputation* comp) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(comp);
return !callers.empty() && callers.at(0)->opcode() == HloOpcode::kWhile;
};
std::vector<HloInstruction*> changed_loops;
bool unified = false;
for (auto& [while_instr, loop_config] : unrollable_loops) {
if (!is_while_body(while_instr->parent())) {
continue;
}
auto acc_input_pairs =
FindAccumulatorInputPairs(alias_analysis, while_instr, loop_config);
for (const auto& [acc, input] : acc_input_pairs) {
if (Match(while_instr->while_init()->mutable_operand(acc->tuple_index()),
match::GetTupleElement(match::Parameter()))) {
continue;
}
VLOG(3) << while_instr->name() << " -> " << "<accumulator_@"
<< acc->tuple_index() << ": " << acc->name() << ", " << "input_@"
<< input->tuple_index() << ": " << input->name() << ">";
TF_RETURN_IF_ERROR(input->ReplaceAllUsesWith(acc));
TF_RETURN_IF_ERROR(while_instr->while_init()->ReplaceOperandWith(
acc->tuple_index(),
while_instr->while_init()->mutable_operand(input->tuple_index())));
if (input->user_count() == 0) {
TF_RETURN_IF_ERROR(while_instr->while_body()->RemoveInstruction(input));
}
unified = true;
}
}
return unified;
}
}
absl::StatusOr<bool> ScanLoopAccumulatorInputUnification::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before ScanLoopAccumulatorInputUnification:";
XLA_VLOG_LINES(2, module->ToString());
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> unrollable_loops =
WhileLoopUnroller::GetUnrollableLoops(module, execution_threads);
TF_ASSIGN_OR_RETURN(bool changed, UnifyAccumulatorWithInput(
*alias_analysis, unrollable_loops));
if (changed) {
for (auto& [while_instr, loop_config] : unrollable_loops) {
TF_RETURN_IF_ERROR(TryRemoveDeadWhileParams(while_instr).status());
}
TF_RETURN_IF_ERROR(TupleSimplifier{}.Run(module).status());
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
VLOG(2) << "HLO module after ScanLoopAccumulatorInputUnification:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after ScanLoopAccumulatorInputUnification";
}
return changed;
}
} | #include "xla/service/scan_loop_accumulator_input_unification.h"
#include <memory>
#include <optional>
#include <utility>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/copy_insertion.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ScanLoopAccumulatorInputUnificationTest = HloTestBase;
HloInstruction* GetTopLevelWhileInstruction(HloModule* module) {
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
return instr;
}
}
return nullptr;
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, UnifyAccumulatorInput) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
tuple.8 = (s32[], s32[], s32[8]) tuple(constant.3, init, array)
while = (s32[], s32[], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, UnifyAccumulatorInput2) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[8] get-tuple-element(wide.arg_tuple.8), index=4
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=5
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.56, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice.1)
add.2 = s32[] multiply(get-tuple-element.47, reshape.4)
reshape.5 = s32[1] reshape(add.2)
dynamic-update-slice.1 = s32[8] dynamic-update-slice(get-tuple-element.55, reshape.5, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, dynamic-update-slice.1, get-tuple-element.56)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
broadcast2 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48, broadcast2, get-tuple-element.54)
while = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=4
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.41)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
array2 = s32[8] constant({10,20,30,40,50,60,70,80})
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, array2)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[8]) tuple(get-tuple-element.40, get-tuple-element.41)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
EXPECT_EQ(instr->while_init()->operand(3)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, AccumulatorAllocateOutside) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, get-tuple-element.54, get-tuple-element.48)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.48, get-tuple-element.40)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
buffer = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, buffer)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=3
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, InputDifferentShape) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8,10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8,10] get-tuple-element(wide.arg_tuple.8), index=3
zero = s32[] constant(0)
dynamic-slice.0 = s32[1,10] dynamic-slice(get-tuple-element.54, get-tuple-element.46, zero), dynamic_slice_sizes={1,10}
reshape.2 = s32[10] reshape(dynamic-slice.0)
dynamic-slice.1 = s32[1] dynamic-slice(reshape.2, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.1)
add.1 = s32[] add(get-tuple-element.47, reshape.3)
reshape.4 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.4, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8,10]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8,10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8,10] parameter(0)
broadcast.5 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8,10]) tuple(constant.3, init, broadcast.5, array)
while = (s32[], s32[], s32[8], s32[8,10]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.39 = s32[] get-tuple-element(while), index=1
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, MultipleUsersInput) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[8] get-tuple-element(wide.arg_tuple.8), index=4
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=5
mult = s32[8] multiply(get-tuple-element.54, get-tuple-element.54)
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.56, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice.1)
add.2 = s32[] multiply(get-tuple-element.47, reshape.4)
reshape.5 = s32[1] reshape(add.2)
dynamic-update-slice.1 = s32[8] dynamic-update-slice(get-tuple-element.55, reshape.5, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, dynamic-update-slice.1, get-tuple-element.56)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
broadcast2 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.54, broadcast2, get-tuple-element.56)
while = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=4
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.41)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
array2 = s32[8] constant({10,20,30,40,50,60,70,80})
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, array2)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[8]) tuple(get-tuple-element.40, get-tuple-element.41)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest,
UnifyAccumulatorInputCheckCopy) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[10] get-tuple-element(wide.arg_tuple.8), index=4
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.55, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.1)
add.1 = s32[] add(reshape.3, reshape.2)
add.2 = s32[] add(add.1, get-tuple-element.47)
reshape.4 = s32[1] reshape(add.2)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.4, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[10]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, get-tuple-element.55)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.55 = s32[10] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[10]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48, get-tuple-element.55)
while = (s32[], s32[], s32[8], s32[8], s32[10]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[10]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.55)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
other_input = s32[10] constant({10,20,30,40,50,60,70,80,90,100})
tuple.8 = (s32[], s32[], s32[8], s32[10]) tuple(constant.3, init, array, other_input)
while = (s32[], s32[], s32[8], s32[10]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.39 = s32[8] get-tuple-element(while), index=2
get-tuple-element.40 = s32[10] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[10]) tuple(get-tuple-element.39, get-tuple-element.40)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool clone_copy_inserted,
CopyInsertion().Run(module_clone.get()));
EXPECT_TRUE(clone_copy_inserted);
HloInstruction* while_instruction =
GetTopLevelWhileInstruction(module_clone.get());
EXPECT_EQ(
while_instruction->while_body()->root_instruction()->operand(2)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
TF_ASSERT_OK_AND_ASSIGN(bool copy_inserted,
CopyInsertion().Run(module.get()));
EXPECT_TRUE(copy_inserted);
VLOG(3) << "After copy_insertion:\n" << module->ToString();
while_instruction = GetTopLevelWhileInstruction(module.get());
EXPECT_NE(
while_instruction->while_body()->root_instruction()->operand(2)->opcode(),
HloOpcode::kCopy);
}
}
} |
1,935 | cpp | tensorflow/tensorflow | operand_upcaster | third_party/xla/xla/service/operand_upcaster.cc | third_party/xla/xla/service/operand_upcaster_test.cc | #ifndef XLA_SERVICE_OPERAND_UPCASTER_H_
#define XLA_SERVICE_OPERAND_UPCASTER_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
#include "xla/util.h"
namespace xla {
class OperandUpcaster : public OpExpanderPass {
public:
explicit OperandUpcaster(HloPredicate extra_filter = nullptr)
: OpExpanderPass(std::move(extra_filter)) {}
absl::string_view name() const override { return "operand_upcaster"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/operand_upcaster.h"
#include <optional>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::optional<Shape>> MaybeInferShape(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kDot:
return ShapeInference::InferDotOpShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->dot_dimension_numbers(),
std::nullopt,
Cast<HloDotInstruction>(instruction)->sparsity());
case HloOpcode::kConvolution:
return ShapeInference::InferConvolveShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->feature_group_count(), instruction->batch_group_count(),
instruction->window(), instruction->convolution_dimension_numbers(),
std::nullopt);
default:
return std::optional<Shape>(std::nullopt);
}
}
}
bool OperandUpcaster::InstructionMatchesPattern(HloInstruction* instruction) {
auto status_or_inferred_shape = MaybeInferShape(instruction);
if (!status_or_inferred_shape.ok() ||
!status_or_inferred_shape->has_value()) {
return false;
}
if (absl::c_count(instruction->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE) == 2) {
return true;
}
PrimitiveType inferred_type = (*status_or_inferred_shape)->element_type();
if (instruction->shape().element_type() == inferred_type &&
instruction->operand(0)->shape().element_type() == inferred_type &&
instruction->operand(1)->shape().element_type() == inferred_type) {
return false;
}
return ShapeUtil::ElementCanUpcast(**status_or_inferred_shape,
instruction->shape());
}
absl::StatusOr<HloInstruction*> OperandUpcaster::ExpandInstruction(
HloInstruction* instruction) {
const bool packed_nibble =
absl::c_count(instruction->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE) == 2;
auto type = instruction->shape().element_type();
if (packed_nibble) {
HloInstruction *lhs_n0 = instruction->mutable_operand(0), *lhs_n1 = lhs_n0,
*rhs_n0 = instruction->mutable_operand(1), *rhs_n1 = rhs_n0;
TF_ASSIGN_OR_RETURN(lhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, lhs_n0,
MakeScalarLike(lhs_n0, 4)));
HloOpcode lhs_shift = ShapeUtil::ElementIsSigned(lhs_n0->shape())
? HloOpcode::kShiftRightArithmetic
: HloOpcode::kShiftRightLogical;
TF_ASSIGN_OR_RETURN(
lhs_n0, MakeBinaryHlo(lhs_shift, lhs_n0, MakeScalarLike(lhs_n0, 4)));
lhs_n0 = MakeConvertToHlo(lhs_n0, type);
TF_ASSIGN_OR_RETURN(
lhs_n1, MakeBinaryHlo(lhs_shift, lhs_n1, MakeScalarLike(lhs_n1, 4)));
lhs_n1 = MakeConvertToHlo(lhs_n1, type);
TF_ASSIGN_OR_RETURN(rhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, rhs_n0,
MakeScalarLike(rhs_n0, 4)));
HloOpcode rhs_shift = ShapeUtil::ElementIsSigned(rhs_n0->shape())
? HloOpcode::kShiftRightArithmetic
: HloOpcode::kShiftRightLogical;
TF_ASSIGN_OR_RETURN(
rhs_n0, MakeBinaryHlo(rhs_shift, rhs_n0, MakeScalarLike(rhs_n0, 4)));
rhs_n0 = MakeConvertToHlo(rhs_n0, type);
TF_ASSIGN_OR_RETURN(
rhs_n1, MakeBinaryHlo(rhs_shift, rhs_n1, MakeScalarLike(rhs_n1, 4)));
rhs_n1 = MakeConvertToHlo(rhs_n1, type);
HloInstruction* linear_n0 =
instruction->parent()->AddInstruction(instruction->CloneWithNewOperands(
instruction->shape(), {lhs_n0, rhs_n0}));
linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(
0, PrecisionConfig::DEFAULT);
linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(
1, PrecisionConfig::DEFAULT);
HloInstruction* linear_n1 =
instruction->parent()->AddInstruction(linear_n0->CloneWithNewOperands(
instruction->shape(), {lhs_n1, rhs_n1}));
return MakeBinaryHlo(HloOpcode::kAdd, linear_n0, linear_n1);
}
for (int i = 0; i < HloDotInstruction::kOperands; ++i) {
auto* operand = instruction->mutable_operand(i);
if (operand->shape().element_type() == type) {
continue;
}
auto upcast_shape = operand->shape();
upcast_shape.set_element_type(type);
auto* convert_inst = instruction->AddInstruction(
HloInstruction::CreateConvert(upcast_shape, operand));
TF_RETURN_IF_ERROR(
instruction->ReplaceOperandWithDifferentShape(i, convert_inst));
}
return nullptr;
}
} | #include "xla/service/operand_upcaster.h"
#include <memory>
#include <tuple>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class OperandUpcasterTest
: public HloTestBase,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {};
bool ShouldUpcast(PrimitiveType operand_type, PrimitiveType result_type) {
return operand_type != result_type &&
primitive_util::HigherPrecisionType(operand_type, result_type) ==
result_type;
}
TEST_P(OperandUpcasterTest, ConvertInserted) {
PrimitiveType lhs_type, rhs_type, result_type;
std::tie(lhs_type, rhs_type, result_type) = GetParam();
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));
EXPECT_EQ(upcasted, ShouldUpcast(lhs_type, result_type) ||
ShouldUpcast(rhs_type, result_type));
auto original_lhs = op::Parameter(0);
auto original_rhs = op::Parameter(1);
auto upcasted_lhs =
ShouldUpcast(lhs_type, result_type)
? AllOf(op::Convert(original_lhs),
op::Shape(absl::Substitute(
"$0[2,3]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type))))
: original_lhs;
auto upcasted_rhs =
ShouldUpcast(rhs_type, result_type)
? AllOf(op::Convert(original_rhs),
op::Shape(absl::Substitute(
"$0[3,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type))))
: original_rhs;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(upcasted_lhs, upcasted_rhs),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
}
INSTANTIATE_TEST_SUITE_P(S16U16, OperandUpcasterTest,
::testing::Values(std::make_tuple(S8, S8, S16),
std::make_tuple(U8, U8, U16)));
INSTANTIATE_TEST_SUITE_P(S32, OperandUpcasterTest,
::testing::Combine(::testing::Values(S8, U8, S16),
::testing::Values(S8, U8, S16),
::testing::Values(S32)));
INSTANTIATE_TEST_SUITE_P(U32, OperandUpcasterTest,
::testing::Combine(::testing::Values(U8, U16),
::testing::Values(U8, U16),
::testing::Values(U32)));
INSTANTIATE_TEST_SUITE_P(BF16, OperandUpcasterTest,
::testing::Combine(::testing::Values(BF16, S8, U8),
::testing::Values(BF16, S8, U8),
::testing::Values(BF16)));
INSTANTIATE_TEST_SUITE_P(F32, OperandUpcasterTest,
::testing::Combine(::testing::Values(BF16, F16),
::testing::Values(BF16, F16),
::testing::Values(F32)));
INSTANTIATE_TEST_SUITE_P(NoUpcast, OperandUpcasterTest,
::testing::Values(std::make_tuple(F32, F32, BF16),
std::make_tuple(S32, S32, U32)));
TEST_F(OperandUpcasterTest, SparseDot) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
p0 = bf16[2,16]{1,0} parameter(0)
p1 = bf16[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));
EXPECT_TRUE(upcasted);
auto upcasted_lhs =
AllOf(op::Convert(op::Parameter(0)), op::Shape("f32[2,16]{1,0}"));
auto upcasted_rhs =
AllOf(op::Convert(op::Parameter(1)), op::Shape("f32[32,2]{1,0}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot,
{upcasted_lhs, upcasted_rhs, op::Parameter(2)})),
op::Shape("f32[2,2]{1,0}")));
}
}
} |
1,936 | cpp | tensorflow/tensorflow | instruction_fusion | third_party/xla/xla/service/gpu/transforms/instruction_fusion.cc | third_party/xla/xla/service/gpu/transforms/instruction_fusion_test.cc | #ifndef XLA_SERVICE_GPU_INSTRUCTION_FUSION_H_
#define XLA_SERVICE_GPU_INSTRUCTION_FUSION_H_
#include <stdint.h>
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuInstructionFusion : public InstructionFusion {
public:
GpuInstructionFusion(bool may_duplicate, const se::DeviceDescription& d)
: InstructionFusion(GpuInstructionFusion::IsExpensive, may_duplicate),
device_info_(d) {}
static bool IsExpensive(const HloInstruction& instruction);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
std::unique_ptr<FusionQueue> GetFusionQueue(
HloComputation* computation) override;
FusionDecision ShouldFuse(HloInstruction* consumer,
int64_t operand_index) override;
HloInstruction::FusionKind ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) override;
private:
FusionDecision ShouldFuseInexpensiveChecks(HloInstruction* consumer,
int64_t operand_index);
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override;
absl::flat_hash_set<const HloComputation*> fusible_computations_;
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
se::DeviceDescription device_info_;
};
}
}
#endif
#include "xla/service/gpu/instruction_fusion.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/meta/type_traits.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/fusion_queue.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
bool ElementIsF32OrF16(const Shape& shape) {
PrimitiveType type = shape.element_type();
return type == F32 || type == F16;
}
class EmptyFusionQueue : public FusionQueue {
public:
std::pair<HloInstruction*, std::vector<int64_t>>
DequeueNextInstructionAndOperandsToFuseInOrder() override {
return {nullptr, {}};
}
void RemoveInstruction(HloInstruction* instruction) override {};
const std::vector<bool>* FusionConfiguration() override { return nullptr; };
};
}
absl::StatusOr<bool> GpuInstructionFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
fusion_node_evaluations_.clear();
auto fusible_computations =
GetFusibleComputations(*module, execution_threads);
fusible_computations_ = {fusible_computations.begin(),
fusible_computations.end()};
return InstructionFusion::Run(module, execution_threads);
}
bool GpuInstructionFusion::IsExpensive(
const HloInstruction& instruction) {
switch (instruction.opcode()) {
case HloOpcode::kDivide:
case HloOpcode::kSqrt:
case HloOpcode::kRsqrt:
case HloOpcode::kExp:
if (ElementIsF32OrF16(instruction.shape())) {
return false;
}
break;
default:
break;
}
return InstructionFusion::IsExpensive(instruction);
}
FusionDecision GpuInstructionFusion::ShouldFuseInexpensiveChecks(
HloInstruction* consumer, int64_t operand_index) {
HloInstruction* producer = consumer->mutable_operand(operand_index);
if (producer->opcode() == HloOpcode::kFusion) {
return "the producer is a fusion";
}
if (consumer->IsCustomFusion()) {
return "the consumer is a custom fusion";
}
if (is_expensive(*producer) &&
ReusesOperandElements(consumer, operand_index)) {
return "the producer is expensive, and the consumer reuses inputs";
}
if (IsInputFusibleReduction(*consumer) &&
IsPhysicallyTransposing(*producer)) {
return "fusing the producer would break read coalescing";
}
RETURN_IF_NOT_FUSIBLE(IsProducerConsumerFusible(*producer, *consumer));
if (CreatesHeavyComputation(*producer, *consumer)) {
return "the fusion would create a heavy computation";
}
return InstructionFusion::ShouldFuse(consumer, operand_index);
}
FusionDecision GpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
int64_t operand_index) {
RETURN_IF_NOT_FUSIBLE(ShouldFuseInexpensiveChecks(consumer, operand_index));
auto producer = consumer->operand(operand_index);
RETURN_IF_NOT_FUSIBLE(
FusionFitsInBudget(*consumer, *producer, device_info_,
true));
if (consumer->opcode() != HloOpcode::kFusion) {
return {};
}
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(producer)) {
return "the fusion would result in an overly large code duplication";
}
return {};
}
HloInstruction::FusionKind GpuInstructionFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
return ChooseFusionKind(*producer, *consumer);
}
HloInstruction* GpuInstructionFusion::FuseInstruction(
HloInstruction* fusion_instruction, HloInstruction* producer) {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation = fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
std::unique_ptr<FusionQueue> GpuInstructionFusion::GetFusionQueue(
HloComputation* computation) {
if (fusible_computations_.contains(computation)) {
return InstructionFusion::GetFusionQueue(computation);
}
return std::make_unique<EmptyFusionQueue>();
}
}
} | #include "xla/service/gpu/instruction_fusion.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
class InstructionFusionTest : public HloTestBase {
public:
GpuInstructionFusion duplicating_instruction_fusion_{
true, TestGpuDeviceInfo::RTXA6000DeviceInfo()};
};
TEST_F(InstructionFusionTest, NoFusionIntoCustomFusionConsumer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
c {
p0 = bf16[3000,53]{1,0} parameter(0)
p1 = bf16[22,53]{1,0} parameter(1)
d = bf16[3000,22]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
r = bf16[1,1,3000,22]{3,2,1,0} reshape(d)
ROOT c = bf16[1,1,3000,22]{2,1,3,0} copy(r)
}
ENTRY e {
p1 = bf16[3000,53]{1,0} parameter(1)
p0 = bf16[22,53]{1,0} parameter(0)
cp0 = bf16[22,53]{1,0} convert(p0)
ROOT f = bf16[1,1,3000,22]{2,1,3,0} fusion(p1, cp0), kind=kCustom, calls=c
})"));
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest,
CostlyProducerAndOperandElementReusingConsumerNotFused) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f)));
HloInstruction* log1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kLog, const0));
HloInstruction* broadcast2 =
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {1}), log1, {}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(broadcast2, computation->root_instruction());
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_EQ(broadcast2, computation->root_instruction());
}
TEST_F(InstructionFusionTest,
NonCostlyProducerAndOperandElementReusingConsumerFused) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5)));
HloInstruction* negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kNegate, const0));
HloInstruction* broadcast2 =
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(S32, {1}), negate1, {}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(broadcast2, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion()));
}
TEST_F(InstructionFusionTest,
CostlyProducerAndNonOperandElementReusingConsumerFused_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f)));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kExp, const0));
HloInstruction* reshape2 = builder.AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeShape(F32, {}), exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(reshape2, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion()));
}
TEST_F(InstructionFusionTest,
CostlyProducerAndNonOperandElementReusingConsumerFused_Transpose) {
HloComputation::Builder builder(TestName());
HloInstruction* const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f)));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kExp, const0));
HloInstruction* transpose2 = builder.AddInstruction(
HloInstruction::CreateTranspose(ShapeUtil::MakeShape(F32, {}), exp1, {}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(transpose2, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion()));
}
TEST_F(InstructionFusionTest, PotentialBitcastReshapeOfDotFused) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1}), "0"));
auto dot1 = builder.AddInstruction(
CreateCanonicalDot(ShapeUtil::MakeShape(F32, {1, 1}), param0, param0));
auto reshape2 = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1, 1, 1}), dot1));
auto log = builder.AddInstruction(HloInstruction::CreateUnary(
reshape2->shape(), xla::HloOpcode::kLog, reshape2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(log, computation->root_instruction());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, PotentialBitcastTransposeOfDotUnfused) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {1, 1}), "0"));
auto dot1 = builder.AddInstruction(
CreateCanonicalDot(ShapeUtil::MakeShape(S32, {1, 1}), param0, param0));
auto transpose2 = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {1, 1}), dot1, {0, 1}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(transpose2, computation->root_instruction());
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, BroadcastIntoReduce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY BroadcastIntoReduce {
constant = f32[] constant(1)
broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={}
constant.1 = f32[] constant(0)
ROOT reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},
to_apply=add
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(m::Reduce(m::Broadcast(m::Constant()), m::Constant())));
}
TEST_F(InstructionFusionTest, DoNotFuseLayoutChangingOpWithReduce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
constant.1 = f32[] constant(0)
ROOT reduce = f32[16] reduce(copy, constant.1), dimensions={0,1,2}, to_apply=add
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, DoNotFuseLayoutChangingOpWithReduceFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_reduce {
p0.1 = f32[16,16,16,16]{0,1,2,3} parameter(0)
mul = f32[16,16,16,16]{0,1,2,3} multiply(p0.1, p0.1)
c0.1 = f32[] constant(0)
ROOT root = f32[] reduce(mul, c0.1), dimensions={0,1,2,3}, to_apply=add
}
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
fusion = f32[] fusion(copy), kind=kInput, calls=fused_reduce
ROOT root = (f32[]) tuple(fusion)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, DoNotRepeatLargeReduceWindow) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
p0 = s32[512,512,2] parameter(0)
p1 = f32[1,1,512,512] parameter(1)
constant_1 = f32[] constant(1)
reduce-window.1 = reduce-window(p1, constant_1),
window={size=1x1x9x9}, to_apply=add
ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3},
collapsed_slice_dims={}, start_index_map={1,2},
index_vector_dim=2, slice_sizes={1,1,1,1}
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, FuseLayoutChangingOpWithElementwise) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry {
p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)
copy = f32[16,16,16,16]{0,1,2,3} copy(p0)
ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Add(m::Copy(), m::Copy())));
}
TEST_F(InstructionFusionTest, BitcastIntoAdd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY BroadcastIntoAdd {
p0 = f32[4,1,1]{2,1,0} parameter(0)
p1 = f32[4,1]{1,0} parameter(1)
bitcast = f32[4,1]{1,0} bitcast(p0)
ROOT add = f32[4,1] add(bitcast, p1)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Add(m::Bitcast(m::Parameter()), m::Parameter())));
}
TEST_F(InstructionFusionTest, AddIntoBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY BroadcastIntoAdd {
p0 = f32[4,1]{1,0} parameter(0)
p1 = f32[4,1]{1,0} parameter(1)
add = f32[4,1] add(p0, p1)
ROOT bitcast = f32[4,1,1] bitcast(add)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, ConvertIntoBitcastBothConsumedByTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
convert = bf16[2048,16000]{1,0} convert(param_0)
bitcast = bf16[16000,1,2048]{2,1,0} bitcast(convert)
ROOT tuple.143 = (bf16[16000,1,2048]{2,1,0}, bf16[2048,16000]{1,0}) tuple(bitcast, convert)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, DontFuseGTE) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY DontFuseGTE {
p0 = (f32[10], f32[10]) parameter(0)
gte0 = f32[10] get-tuple-element(p0), index=0
gte1 = f32[10] get-tuple-element(p0), index=1
ROOT add = f32[10] add(gte0, gte1)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, FloatingPointDivIsCheap) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY TestComputation {
zero = f32[] constant(0)
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
recip = f32[100] divide(p1, p0)
sum1 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
sum2 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
ROOT root = (f32[], f32[]) tuple(sum1, sum2)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(), m::Fusion())))
<< module->ToString();
}
TEST_F(InstructionFusionTest, IntegerDivIsNotCheap) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
Add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY TestComputation {
zero = s32[] constant(0)
p0 = s32[100] parameter(0)
p1 = s32[100] parameter(1)
recip = s32[100] divide(p1, p0)
sum1 = s32[] reduce(recip, zero), dimensions={0}, to_apply=Add
sum2 = s32[] reduce(recip, zero), dimensions={0}, to_apply=Add
ROOT mul = (s32[], s32[]) tuple(sum1, sum2)
})")
.value();
EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value())
<< module->ToString();
}
TEST_F(InstructionFusionTest, DotOutputFusionImpossible) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY NoOutputFusion {
alpha = f32[] constant(3)
broadcast = f32[4,4]{1,0} broadcast(alpha), dimensions={}
p0 = f32[4,3]{1,0} parameter(0)
p1 = f32[3,4]{1,0} parameter(1)
dot = f32[4,4]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
d = f32[4,4]{1,0} multiply(dot, dot)
ROOT mul = f32[4,4] multiply(d, broadcast)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop);
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(m::Multiply(m::Multiply(m::Parameter(), m::Parameter()),
m::Broadcast(m::Constant()))));
}
static int Count(const HloModule& module, HloOpcode op) {
int count = 0;
for (const auto* computation : module.computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == op) {
++count;
}
}
}
return count;
}
TEST_F(InstructionFusionTest, MultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY OutputFusion {
p0 = f32[4,3]{1,0} parameter(0)
p1 = f32[4,3]{1,0} parameter(1)
p2 = f32[4,3]{1,0} parameter(2)
sub = f32[4,3]{1,0} subtract(p0, p2)
add = f32[4,3]{1,0} add(sub, p1)
ROOT tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}) tuple(sub, add)
})")
.value();
ASSERT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value());
}
TEST_F(InstructionFusionTest, FuseScalarConstant) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY FuseScalarConstant {
p0 = f32[] parameter(0)
c0 = f32[] constant(1)
add1 = f32[] add(p0, c0)
b0 = f32[2]{0} broadcast(add1), dimensions={}
c1 = f32[2]{0} constant({1, 2})
ROOT add2 = f32[2]{0} add(b0, c1)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(
root->fused_expression_root(),
GmockMatch(m::Add(m::Broadcast(m::Add(m::Parameter(), m::Constant())),
m::Parameter())));
}
TEST_F(InstructionFusionTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = 200;
ASSERT_GT(kNumParams, MaxOperandsAndOutputsPerFusion());
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
auto param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p"));
auto sum = param0;
for (int64_t i = 1; i < kNumParams; ++i) {
auto param =
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p"));
sum = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, param));
}
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(b.Build());
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
for (const HloInstruction* instr : computation->instructions()) {
EXPECT_LE(instr->operand_count(), MaxOperandsAndOutputsPerFusion())
<< instr->ToString();
}
}
TEST_F(InstructionFusionTest, FuseIntoScatter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(p0, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(InstructionFusionTest, DontFuseIntoFirstOperandOfScatter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(InstructionFusionTest, ScatterOpShouldNotFuseWithSharedOperand) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY Test {
parameter.0 = f32[8,8] parameter(0)
parameter.1 = s32[7] parameter(1)
indices = s32[7] add(parameter.1, parameter.1)
slice = f32[7,8] slice(parameter.0), slice={[0:7],[0:8]}
ROOT scatter = f32[8,8] scatter(parameter.0, indices, slice),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, GmockMatch(m::Fusion(m::Parameter(), m::Slice(), m::Parameter())));
}
TEST_F(InstructionFusionTest, NonscalarConstantsNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY BroadcastIntoReduce {
constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15})
broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0}
constant.1 = f32[] constant(0)
ROOT reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},
to_apply=add
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(
root->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Reduce(m::Broadcast(m::Parameter()), m::Constant())));
}
TEST_F(InstructionFusionTest, FuseReverse) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY Reverse {
p0 = f32[50,96,1024]{2,1,0} parameter(0)
add = f32[50,96,1024]{2,1,0} add(p0, p0)
ROOT reverse = f32[50,96,1024] reverse(add), dimensions={0}
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Reverse(m::Add(m::Parameter(), m::Parameter()))));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveF32) {
auto m = CreateNewVerifiedModule();
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kDivide, param0, one));
HloInstruction* rem = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kRemainder, param0, one));
HloInstruction* sqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kSqrt, param0));
HloInstruction* rsqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kRsqrt, param0));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, param0));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rem));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*sqrt));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rsqrt));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*exp));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveF64) {
auto m = CreateNewVerifiedModule();
Shape r0f64 = ShapeUtil::MakeShape(F64, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f64, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0f64, HloOpcode::kDivide, param0, one));
HloInstruction* rem = builder.AddInstruction(
HloInstruction::CreateBinary(r0f64, HloOpcode::kRemainder, param0, one));
HloInstruction* sqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f64, HloOpcode::kSqrt, param0));
HloInstruction* rsqrt = builder.AddInstruction(
HloInstruction::CreateUnary(r0f64, HloOpcode::kRsqrt, param0));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f64, HloOpcode::kExp, param0));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rem));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*sqrt));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rsqrt));
EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*exp));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveS32) {
auto m = CreateNewVerifiedModule();
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0s32, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kDivide, param0, one));
HloInstruction* rem = builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kRemainder, param0, one));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rem));
}
TEST_F(InstructionFusionTest, GpuIsExpensiveBroadcastS32) {
auto m = CreateNewVerifiedModule();
Shape r1s32 = ShapeUtil::MakeShape(S32, {10});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r1s32, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
HloInstruction* one_broad =
builder.AddInstruction(HloInstruction::CreateBroadcast(r1s32, one, {}));
HloInstruction* div = builder.AddInstruction(HloInstruction::CreateBinary(
r1s32, HloOpcode::kDivide, param0, one_broad));
HloInstruction* rem = builder.AddInstruction(HloInstruction::CreateBinary(
r1s32, HloOpcode::kRemainder, param0, one_broad));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div));
EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rem));
}
TEST_F(InstructionFusionTest, FloatingPointExpIsCheap) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY TestComputation {
zero = f32[] constant(0)
p0 = f32[100] parameter(0)
recip = f32[100] exponential(p0)
sum1 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
sum2 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add
ROOT root = (f32[], f32[]) tuple(sum1, sum2)
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(), m::Fusion())))
<< module->ToString();
}
TEST_F(InstructionFusionTest, SmallReducedDimensionIsNotLoweredToLoop) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseSmallReduction {
p0 = s32[1048576,4] parameter(0)
p1 = s32[1048576,4] parameter(1)
sum = s32[1048576,4] add(p0, p1)
init = s32[] constant(0)
ROOT reduce = s32[1048576] reduce(sum, init), dimensions={1}, to_apply=add
})")
.value();
EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput);
}
TEST_F(InstructionFusionTest, IotaIntoVariadicReduction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=f
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax
})")
.value();
EXPECT_TRUE(GpuInstructionFusion(false,
TestGpuDeviceInfo::RTXA6000DeviceInfo())
.Run(module.get())
.value());
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
EXPECT_THAT(
module->entry_computation()->root_instruction()->fused_expression_root(),
GmockMatch(
m::Reduce(m::Parameter(), m::Iota(), m::Constant(), m::Constant())));
}
TEST_F(InstructionFusionTest, InputReductionFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add.clone.13 {
x.27 = f32[] parameter(0)
y.27 = f32[] parameter(1)
ROOT add.1036 = f32[] add(x.27, y.27)
}
add.clone.14 {
x.28 = f32[] parameter(0)
y.28 = f32[] parameter(1)
ROOT add.1037 = f32[] add(x.28, y.28)
}
add {
x = bf16[] parameter(0)
convert.448 = f32[] convert(x)
y = bf16[] parameter(1)
convert.449 = f32[] convert(y)
add.597 = f32[] add(convert.448, con |
1,937 | cpp | tensorflow/tensorflow | tree_reduction_rewriter | third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc | third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter_test.cc | #ifndef XLA_SERVICE_GPU_TREE_REDUCTION_REWRITER_H_
#define XLA_SERVICE_GPU_TREE_REDUCTION_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuTreeReductionRewriter : public HloModulePass {
public:
explicit GpuTreeReductionRewriter(se::GpuComputeCapability gpu_version)
: gpu_version_(gpu_version) {}
~GpuTreeReductionRewriter() override = default;
absl::string_view name() const override {
return "gpu-tree-reduction-rewriter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::GpuComputeCapability gpu_version_;
};
}
}
#endif
#include "xla/service/gpu/tree_reduction_rewriter.h"
#include <cmath>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReductionRewriterVisitor : public DfsHloRewriteVisitor {
public:
explicit ReductionRewriterVisitor(se::GpuComputeCapability gpu_version)
: gpu_version_(gpu_version) {}
absl::Status HandleReduce(HloInstruction *hlo) override {
if (hlo->GetModule()
->config()
.debug_options()
.xla_gpu_mlir_emitter_level() < 4 &&
IsMinMaxReduction(hlo)) {
VLOG(1) << "Not performing tree expansion on min/max-reduction: "
<< hlo->ToString() << " since min/max operations are associative";
return absl::OkStatus();
}
if (!IsReductionFromOrToContiguousDimensions(*hlo)) {
return absl::OkStatus();
}
return RewriteReduction(hlo);
}
private:
bool IsMinMaxReduction(HloInstruction *hlo) {
HloComputation *called = hlo->called_computations()[0];
if (std::optional<ReductionKind> reduction_kind =
MatchReductionComputation(called)) {
return reduction_kind == ReductionKind::MAX ||
reduction_kind == ReductionKind::MIN;
}
return false;
}
bool ShouldSwapInnerAndOuterReducedMinorDimension(uint64_t k,
uint64_t n_div_k,
uint64_t n,
int64_t race_free_bound,
bool is_row_reduction) {
CHECK(k >= n_div_k);
if (k > race_free_bound) {
return false;
}
if (is_row_reduction) {
bool maybe_vectorized = n_div_k % 2 == 0 && n % 2 == 0;
if (maybe_vectorized) {
return n_div_k * 2 < k || k % 2 == 0;
}
return n % 2 == 0 || k % 2 != 0;
}
return true;
}
absl::Status RewriteReduction(HloInstruction *hlo) {
ReductionDimensions reduction_dimensions =
GetReductionKindAndContiguousComponents(*hlo);
VLOG(5) << "Input: " << hlo->ToString();
auto *reduce = Cast<HloReduceInstruction>(hlo);
absl::Span<int64_t const> input_shape_dims =
reduce->inputs()[0]->shape().dimensions();
VLOG(3) << "Input dimensions: " << absl::StrJoin(input_shape_dims, ", ");
bool reduce_batch_dimension = hlo->dimensions().size() > 1;
VLOG(3) << "reduce_batch_dimension = " << reduce_batch_dimension;
std::vector<int64_t> reduced_dimensions = *hlo->mutable_dimensions();
absl::c_sort(reduced_dimensions);
CHECK_LE(reduced_dimensions.size(), 2);
int64_t reduced_input_dimension =
reduced_dimensions[reduced_dimensions.size() - 1];
VLOG(3) << "reduced_input_dimension: " << reduced_input_dimension;
if (reduce_batch_dimension &&
input_shape_dims[0] > BatchedReductionRaceFreeBound()) {
VLOG(2) << "Splitting batched dimension reduce into a separate reduction";
VLOG(1) << "Input: " << hlo->ToString();
return RewriteBatchDimensionLargerThanTile(reduce, reduction_dimensions,
reduced_input_dimension);
}
bool is_row_reduction = reduction_dimensions.is_row_reduction;
if (ReductionIsRaceFree(hlo->GetModule()->config(), reduction_dimensions)) {
VLOG(3) << "Base case: dimensions fit";
return absl::OkStatus();
}
VLOG(1) << "Input: " << hlo->ToString();
int64_t n = input_shape_dims[reduced_input_dimension];
VLOG(3) << "n = " << n;
uint64_t n_div_k = static_cast<uint64_t>(std::floor(std::sqrt(n)));
int64_t race_free_bound = ReductionDimensionRaceFreeBound(
hlo->GetModule()->config(), reduction_dimensions);
if (n_div_k > race_free_bound) {
n_div_k = race_free_bound;
}
uint64_t minimum_padding = (n_div_k - n % n_div_k) % n_div_k;
uint64_t best_k = (n + minimum_padding) / n_div_k;
for (uint64_t i = n_div_k - 1; i > n_div_k / 2; --i) {
uint64_t padding = (i - n % i) % i;
if (padding < minimum_padding ||
(padding == minimum_padding && absl::has_single_bit(i))) {
minimum_padding = padding;
best_k = (n + padding) / i;
}
}
uint64_t padded_n = n + minimum_padding;
uint64_t best_n_div_k = padded_n / best_k;
if (ShouldSwapInnerAndOuterReducedMinorDimension(
best_k, best_n_div_k, n, race_free_bound, is_row_reduction)) {
std::swap(best_k, best_n_div_k);
}
bool no_padding_necessary = n == padded_n;
using InstructionVector = absl::InlinedVector<HloInstruction *, 2>;
auto padded = [&]() -> InstructionVector {
if (no_padding_necessary) {
return InstructionVector(reduce->inputs().begin(),
reduce->inputs().end());
}
PaddingConfig padding_config =
MakeNoPaddingConfig(input_shape_dims.size());
padding_config.mutable_dimensions(reduced_input_dimension)
->set_edge_padding_high(padded_n - n);
std::vector<int64_t> padded_dimensions(input_shape_dims.begin(),
input_shape_dims.end());
padded_dimensions[reduced_input_dimension] = padded_n;
absl::InlinedVector<HloInstruction *, 2> out;
out.reserve(reduce->input_count());
for (int i = 0; i < reduce->input_count(); i++) {
HloInstruction *in = reduce->inputs()[i];
Shape padded_shape =
ShapeUtil::MakeShape(in->shape().element_type(), padded_dimensions);
VLOG(3) << "Generated padded shape: " << padded_shape.ToString();
out.push_back(hlo->parent()->AddInstruction(
HloInstruction::CreatePad(padded_shape, in,
reduce->init_values()[i], padding_config),
&in->metadata()));
}
return out;
}();
VLOG(2) << "Generated padding: " << padded[0]->ToString();
absl::InlinedVector<int64_t, 3> reshaped_dimensions;
for (int64_t dim_idx = 0; dim_idx < padded[0]->shape().dimensions_size();
dim_idx++) {
if (dim_idx == reduced_input_dimension) {
reshaped_dimensions.push_back(best_k);
reshaped_dimensions.push_back(padded_n / best_k);
} else {
reshaped_dimensions.push_back(padded[0]->shape().dimensions(dim_idx));
}
}
absl::InlinedVector<int64_t, 3> inner_reduce_dimensions =
reshaped_dimensions;
int64_t inner_reduced_dimension = is_row_reduction
? inner_reduce_dimensions.size() - 1
: reduced_input_dimension + 1;
VLOG(2) << "inner_reduced_dimension = " << inner_reduced_dimension;
inner_reduce_dimensions.erase(inner_reduce_dimensions.begin() +
inner_reduced_dimension);
if (reduce_batch_dimension) {
inner_reduce_dimensions.erase(inner_reduce_dimensions.begin());
}
std::vector<int64_t> dims_to_reduce = {inner_reduced_dimension};
if (reduce_batch_dimension) {
dims_to_reduce.push_back(0);
inner_reduced_dimension -= 1;
}
InstructionVector reshaped_padded_inputs;
absl::InlinedVector<Shape, 2> inner_reduce_shapes;
for (int i = 0; i < padded.size(); i++) {
HloInstruction *p = padded[i];
Shape reshaped_shape =
ShapeUtil::MakeShape(p->shape().element_type(), reshaped_dimensions);
HloInstruction *reshaped_padded_input = hlo->parent()->AddInstruction(
HloInstruction::CreateBitcast(reshaped_shape, p), &p->metadata());
VLOG(2) << "Generated reshape: " << reshaped_padded_input->ToString();
reshaped_padded_inputs.push_back(reshaped_padded_input);
Shape inner_reduce_shape = ShapeUtil::MakeShape(p->shape().element_type(),
inner_reduce_dimensions);
inner_reduce_shapes.push_back(inner_reduce_shape);
}
HloInstruction *inner_reduce = hlo->parent()->AddInstruction(
HloInstruction::CreateReduce(
ShapeUtil::MakeMaybeTupleShape(inner_reduce_shapes),
reshaped_padded_inputs, reduce->init_values(), dims_to_reduce,
hlo->to_apply()),
&reduce->metadata());
VLOG(1) << "Generated inner reduction: " << inner_reduce->ToString();
absl::InlinedVector<int64_t, 3> outer_reduce_dimensions =
inner_reduce_dimensions;
VLOG(3) << "outer_reduce_dimensions = "
<< absl::StrJoin(outer_reduce_dimensions, ", ");
int64_t outer_reduced_dimension = is_row_reduction
? outer_reduce_dimensions.size() - 1
: reduced_input_dimension;
outer_reduce_dimensions.erase(outer_reduce_dimensions.begin() +
outer_reduced_dimension);
std::unique_ptr<HloInstruction> outer_reduce = HloInstruction::CreateReduce(
hlo->shape(), inner_reduce, reduce->init_values(),
{outer_reduced_dimension}, hlo->to_apply());
VLOG(1) << "Generated outer reduction: " << outer_reduce->ToString();
return ReplaceWithNewInstruction(hlo, std::move(outer_reduce));
}
absl::Status RewriteBatchDimensionLargerThanTile(
HloReduceInstruction *hlo,
const ReductionDimensions &reduction_dimensions,
int64_t reduced_input_dimension) {
CHECK(reduction_dimensions.is_row_reduction);
absl::InlinedVector<Shape, 2> tuple_shapes;
for (HloInstruction *input : hlo->inputs()) {
tuple_shapes.push_back(
ShapeUtil::DeleteDimension(reduced_input_dimension, input->shape()));
}
HloInstruction *inner_reduce =
hlo->parent()->AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeMaybeTupleShape(tuple_shapes), hlo->inputs(),
hlo->init_values(), {reduced_input_dimension}, hlo->to_apply()));
VLOG(1) << "Inner reduction: " << inner_reduce->ToString();
std::unique_ptr<HloInstruction> out = HloInstruction::CreateReduce(
hlo->shape(), inner_reduce, hlo->init_values(), {0}, hlo->to_apply());
VLOG(1) << "Generated: " << out->ToString();
return ReplaceWithNewInstruction(hlo, std::move(out));
}
se::GpuComputeCapability gpu_version_;
};
absl::StatusOr<bool> GpuTreeReductionRewriter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
VLOG(5) << "Rewriter input: " << module->ToString();
TF_ASSIGN_OR_RETURN(bool changed,
ReductionRewriterVisitor(gpu_version_)
.RunOnModule(module, execution_threads));
VLOG(5) << "Rewriter output: " << module->ToString();
return changed;
}
}
} | #include "xla/service/gpu/tree_reduction_rewriter.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class TreeReductionRewriterTest : public HloTestBase {
public:
void CheckTreeRewriter(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo,
#if TENSORFLOW_USE_ROCM
gpu::GpuTreeReductionRewriter{se::RocmComputeCapability {
"908"
}},
#else
gpu::GpuTreeReductionRewriter{se::CudaComputeCapability{8, 1}},
#endif
expected);
}
};
TEST_F(TreeReductionRewriterTest, RowReductionSingleDimensionNoBatched) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[50021] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, RowReductionWeirdOutputLayout) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[2,4,17000]{2,1,0} parameter(0)
zero = f32[] constant(0)
ROOT out = f32[2,4]{0,1} reduce(input, zero), dimensions={2}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest,
RowReductionSingleDimensionNoBatchedDivisible) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[50048] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, RowReductionNoBatched) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[100,10,65536] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100,10] reduce(input, zero), dimensions={2}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest,
RowReductionSingleDimensionNoBatchedLargeInput) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1048576] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, RowReductionBatchedDimensionFits) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[8,100,65536] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100] reduce(input, zero), dimensions={0,2}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, RowReductionBatchedDimensionDoesNotFit) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[32,100,90000] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100] reduce(input, zero), dimensions={0,2}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, ColumnReductionSimple) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[16384,100] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, ColumnReductionSimpleNoDivisible) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[10303,100] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, ColumnReductionOtherIndex) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[16384,2,2,2] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[2,2,2] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, ColumnReductionVeryLargeInput) {
const char* hlo = R"(
HloModule ReduceWithPadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1048576,5] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[5] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, VariadicReductionLargeRow) {
const char* hlo = R"(
HloModule Reduce_R1x2_to_R0x2_argmax
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[2,100003] parameter(0)
idxs = u32[2,100003] iota(), iota_dimension=0
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[2], u32[2]) reduce(
input, idxs, zero, zero_idx),
dimensions={1},
to_apply=%argmax
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, VariadicReductionLargeBatchSize) {
const char* hlo = R"(
HloModule Reduce_R1x2_to_R0x2_argmax
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[20,2,100] parameter(0)
idxs = u32[20,2,100] iota(), iota_dimension=0
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[2], u32[2]) reduce(
input, idxs, zero, zero_idx),
dimensions={0,2},
to_apply=%argmax
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, KeepInnerReductionVectorized) {
const char* hlo = R"(
HloModule KeepInnerRowReductionVectorized
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1024,73984] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[1024] reduce(input, zero), dimensions={1}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, PreferLargeVectorizedDimension) {
const char* hlo = R"(
HloModule PreferLargeVectorizedDimension
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1024,98304] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[1024] reduce(input, zero), dimensions={1}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, SwapIfNonAlignedBeforePadding) {
const char* hlo = R"(
HloModule SwapIfNonAlignedBeforePadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1024,19739] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[1024] reduce(input, zero), dimensions={1}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, DontSwapIfNonAlignedBeforePadding) {
const char* hlo = R"(
HloModule DontSwapIfNonAlignedBeforePadding
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[1024,19459] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[1024] reduce(input, zero), dimensions={1}, to_apply=add
}
)";
CheckTreeRewriter(hlo,
R"(
)");
}
}
} |
1,938 | cpp | tensorflow/tensorflow | while_util | third_party/xla/xla/service/while_util.cc | third_party/xla/xla/service/while_util_test.cc | #ifndef XLA_SERVICE_WHILE_UTIL_H_
#define XLA_SERVICE_WHILE_UTIL_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/call_inliner.h"
#include "xla/xla_data.pb.h"
namespace xla {
class WhileUtil {
public:
struct MakeInstructionsLiveInResult {
HloInstruction* new_while_instr;
HloInstruction* replacement_instr;
std::vector<HloInstruction*> while_body_live_in_values;
CallInliner::InlinedInstructionMap while_body_instruction_map;
CallInliner::InlinedInstructionMap while_condition_instruction_map;
};
static absl::StatusOr<MakeInstructionsLiveInResult> MakeInstructionsLiveIn(
HloInstruction* while_instr,
absl::Span<HloInstruction* const> instructions);
using LoopStateTy = std::vector<HloInstruction*>;
using LoopBodyGeneratorTy = absl::FunctionRef<absl::StatusOr<LoopStateTy>(
HloInstruction* ,
const LoopStateTy& )>;
static absl::StatusOr<LoopStateTy> MakeCountedLoop(
HloComputation* computation, int32_t trip_count,
const LoopStateTy& init_values, LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata);
struct OwningLoopStateTy {
std::vector<std::unique_ptr<HloInstruction>> instructions_to_add;
WhileUtil::LoopStateTy while_results;
};
static absl::StatusOr<OwningLoopStateTy> MakeCountedLoop(
HloModule* module, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata);
static std::vector<HloInstruction*> GetInvariantGTEsForWhileBody(
const HloComputation& while_body);
static absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
GetGTEsMapForWhileConditional(const HloComputation& while_conditional);
};
}
#endif
#include "xla/service/while_util.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileCondition(HloComputation* narrow_condition, const Shape& wide_shape) {
const Shape& narrow_shape =
narrow_condition->parameter_instruction(0)->shape();
HloComputation* wide_while_cond = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_condition->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.",
narrow_condition->parameter_instruction(0)->name())));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return narrow_condition->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_while_cond->parameter_instruction(0),
narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_cond->parameter_instruction(0)->name()));
HloInstruction* call_narrow_cond = wide_while_cond->AddInstruction(
HloInstruction::CreateCall(ShapeUtil::MakeShape(PRED, {}),
{truncated_parameter}, narrow_condition));
wide_while_cond->set_root_instruction(call_narrow_cond);
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_cond));
return {{wide_while_cond, std::move(inlined_instructions_map)}};
}
static absl::StatusOr<
std::pair<HloComputation*, CallInliner::InlinedInstructionMap>>
WidenWhileBody(HloComputation* narrow_body, const Shape& wide_shape) {
const Shape& narrow_shape = narrow_body->parameter_instruction(0)->shape();
HloComputation* wide_while_body = [&]() {
HloComputation::Builder builder(StrCat("wide.", narrow_body->name()));
builder.AddInstruction(HloInstruction::CreateParameter(
0, wide_shape,
absl::StrCat("wide.", narrow_body->parameter_instruction(0)->name())));
return narrow_body->parent()->AddEmbeddedComputation(builder.Build());
}();
HloInstruction* wide_parameter = wide_while_body->parameter_instruction(0);
HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(
wide_parameter, narrow_shape.tuple_shapes_size(),
absl::StrCat("renarrowed.",
wide_while_body->parameter_instruction(0)->name()));
HloInstruction* call_narrow_body =
wide_while_body->AddInstruction(HloInstruction::CreateCall(
narrow_shape, {truncated_parameter}, narrow_body));
std::vector<HloInstruction*> live_through_values;
for (int i = narrow_shape.tuple_shapes_size();
i < wide_shape.tuple_shapes_size(); i++) {
live_through_values.push_back(wide_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(wide_shape.tuple_shapes(i),
wide_parameter, i),
absl::StrCat(wide_while_body->name(), ".through.",
i - narrow_shape.tuple_shapes_size())));
}
wide_while_body->set_root_instruction(
TupleUtil::AppendSuffix(call_narrow_body, live_through_values));
TF_ASSIGN_OR_RETURN(auto inlined_instructions_map,
CallInliner::Inline(call_narrow_body));
return {{wide_while_body, std::move(inlined_instructions_map)}};
}
absl::StatusOr<WhileUtil::MakeInstructionsLiveInResult>
WhileUtil::MakeInstructionsLiveIn(
HloInstruction* while_instr,
absl::Span<HloInstruction* const> instructions) {
CHECK(while_instr->shape().IsTuple());
int elements_in_old_while_shape = while_instr->shape().tuple_shapes_size();
Shape new_while_shape = while_instr->shape();
for (auto* instruction : instructions) {
*new_while_shape.add_tuple_shapes() = instruction->shape();
}
HloComputation* new_while_condition;
CallInliner::InlinedInstructionMap inlined_condition_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_condition, inlined_condition_instructions_map),
WidenWhileCondition(while_instr->while_condition(), new_while_shape));
HloComputation* new_while_body;
CallInliner::InlinedInstructionMap inlined_instructions_map;
TF_ASSIGN_OR_RETURN(
std::tie(new_while_body, inlined_instructions_map),
WidenWhileBody(while_instr->while_body(), new_while_shape));
HloInstruction* new_while_init =
TupleUtil::AppendSuffix(while_instr->mutable_operand(0), instructions);
HloComputation* containing_computation = while_instr->parent();
HloInstruction* new_while = containing_computation->AddInstruction(
HloInstruction::CreateWhile(new_while_shape, new_while_condition,
new_while_body, new_while_init));
HloInstruction* replacement_instr = TupleUtil::ExtractPrefix(
new_while, while_instr->shape().tuple_shapes_size());
TF_RETURN_IF_ERROR(while_instr->ReplaceAllUsesWith(replacement_instr));
TF_RETURN_IF_ERROR(containing_computation->RemoveInstruction(while_instr));
HloInstruction* while_body_param = new_while_body->parameter_instruction(0);
std::vector<HloInstruction*> live_in_instructions;
for (int64_t i = elements_in_old_while_shape;
i < new_while_shape.tuple_shapes_size(); i++) {
live_in_instructions.push_back(new_while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(
instructions[i - elements_in_old_while_shape]->shape(),
while_body_param, i),
absl::StrCat(new_while_body->name(), ".in.",
i - elements_in_old_while_shape)));
}
WhileUtil::MakeInstructionsLiveInResult result;
result.new_while_instr = new_while;
result.replacement_instr = replacement_instr;
result.while_body_live_in_values = std::move(live_in_instructions);
result.while_body_instruction_map = std::move(inlined_instructions_map);
result.while_condition_instruction_map =
std::move(inlined_condition_instructions_map);
return std::move(result);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopConditionComputation(const Shape& loop_state_shape,
int32_t trip_count) {
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> cond_computation,
CreateComputationWithSignature(
{&loop_state_shape}, scalar_pred, "while_cond"));
HloInstruction* trip_count_constant =
cond_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(trip_count)));
HloInstruction* param = cond_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * compare,
MakeCompareHlo(ComparisonDirection::kLt, indvar, trip_count_constant));
cond_computation->set_root_instruction(compare);
return std::move(cond_computation);
}
static absl::StatusOr<std::unique_ptr<HloComputation>>
MakeCountedLoopBodyComputation(
const Shape& loop_state_shape,
absl::FunctionRef<absl::StatusOr<WhileUtil::LoopStateTy>(
HloInstruction*, const WhileUtil::LoopStateTy&)>
loop_body_generator) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloComputation> body_computation,
CreateComputationWithSignature(
{&loop_state_shape}, loop_state_shape, "while_body"));
HloInstruction* one = body_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
HloInstruction* param = body_computation->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(HloInstruction * indvar,
MakeGetTupleElementHlo(param, 0));
TF_ASSIGN_OR_RETURN(HloInstruction * next_indvar,
MakeBinaryHlo(HloOpcode::kAdd, indvar, one));
std::vector<HloInstruction*> loop_body_generator_args;
for (int i = 1, e = loop_state_shape.tuple_shapes_size(); i < e; i++) {
TF_ASSIGN_OR_RETURN(HloInstruction * tuple_element,
MakeGetTupleElementHlo(param, i));
loop_body_generator_args.push_back(tuple_element);
}
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> next_state,
loop_body_generator(indvar, loop_body_generator_args));
next_state.insert(next_state.begin(), next_indvar);
HloInstruction* next_state_tuple =
body_computation->AddInstruction(HloInstruction::CreateTuple(next_state));
body_computation->set_root_instruction(next_state_tuple);
return std::move(body_computation);
}
static std::pair<std::unique_ptr<HloInstruction>,
std::unique_ptr<HloInstruction>>
MakeInitTupleFromInitValues(const WhileUtil::LoopStateTy& init_values) {
std::vector<HloInstruction*> init_values_with_indvar;
init_values_with_indvar.reserve(init_values.size() + 1);
std::unique_ptr<HloInstruction> zero =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0));
init_values_with_indvar.push_back(zero.get());
absl::c_copy(init_values, std::back_inserter(init_values_with_indvar));
return std::make_pair(std::move(zero),
HloInstruction::CreateTuple(init_values_with_indvar));
}
static Shape MakeLoopStateShapeWithLayout(
const WhileUtil::LoopStateTy& init_values) {
std::vector<Shape> loop_state_shape_components;
loop_state_shape_components.reserve(init_values.size() + 1);
loop_state_shape_components.push_back(ShapeUtil::MakeShape(S32, {}));
absl::c_transform(init_values,
std::back_inserter(loop_state_shape_components),
[](HloInstruction* instr) {
Shape shape = instr->shape();
if (!shape.has_layout()) {
LayoutUtil::SetToDefaultLayout(&shape);
}
return shape;
});
return ShapeUtil::MakeTupleShape(loop_state_shape_components);
}
absl::StatusOr<WhileUtil::OwningLoopStateTy>
WhileUtil::MakeCountedLoop(HloModule* module, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
CHECK_GE(trip_count, 0);
Shape loop_state_shape = MakeLoopStateShapeWithLayout(init_values);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> cond,
MakeCountedLoopConditionComputation(loop_state_shape, trip_count));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> body,
MakeCountedLoopBodyComputation(loop_state_shape, loop_body_generator));
std::unique_ptr<HloInstruction> owned_indvar;
std::unique_ptr<HloInstruction> owned_init_tuple;
std::tie(owned_indvar, owned_init_tuple) =
MakeInitTupleFromInitValues(init_values);
std::unique_ptr<HloInstruction> owned_while = HloInstruction::CreateWhile(
loop_state_shape, module->AddEmbeddedComputation(std::move(cond)),
module->AddEmbeddedComputation(std::move(body)), owned_init_tuple.get());
owned_while->set_metadata(metadata);
HloInstruction* while_instr = owned_while.get();
std::vector<std::unique_ptr<HloInstruction>> owned;
owned.push_back(std::move(owned_indvar));
owned.push_back(std::move(owned_init_tuple));
owned.push_back(std::move(owned_while));
std::vector<HloInstruction*> while_results;
for (int64_t i = 0, e = init_values.size(); i < e; i++) {
std::unique_ptr<HloInstruction> user_state =
HloInstruction::CreateGetTupleElement(init_values[i]->shape(),
while_instr, i + 1);
while_results.push_back(user_state.get());
owned.push_back(std::move(user_state));
}
return WhileUtil::OwningLoopStateTy{std::move(owned), while_results};
}
absl::StatusOr<WhileUtil::LoopStateTy> WhileUtil::MakeCountedLoop(
HloComputation* computation, int32_t trip_count,
const WhileUtil::LoopStateTy& init_values,
WhileUtil::LoopBodyGeneratorTy loop_body_generator,
const OpMetadata& metadata) {
TF_ASSIGN_OR_RETURN(
auto owning_loop_state,
MakeCountedLoop(computation->parent(), trip_count, init_values,
loop_body_generator, metadata));
for (auto& instruction_to_add : owning_loop_state.instructions_to_add) {
computation->AddInstruction(std::move(instruction_to_add));
}
return owning_loop_state.while_results;
}
std::vector<HloInstruction*> WhileUtil::GetInvariantGTEsForWhileBody(
const HloComputation& while_body) {
std::vector<HloInstruction*> result;
const HloInstruction::InstructionVector root_operands =
while_body.root_instruction()->operands();
for (int i = 0; i < root_operands.size(); i++) {
HloInstruction* instr = root_operands[i];
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == i &&
instr->operand(0) == while_body.parameter_instruction(0)) {
result.push_back(instr);
}
}
return result;
}
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
WhileUtil::GetGTEsMapForWhileConditional(
const HloComputation& while_conditional) {
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>> result;
for (HloInstruction* user :
while_conditional.parameter_instruction(0)->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
result[user->tuple_index()].push_back(user);
}
}
return result;
}
} | #include "xla/service/while_util.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileUtilTest : public HloTestBase {
protected:
absl::StatusOr<std::unique_ptr<VerifiedHloModule>> GetParsedModule(
HloComputation** entry_computation, HloInstruction** param0,
HloInstruction** param1, HloInstruction** param2) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
while_body {
ROOT p_body = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
}
while_condition {
p_cond = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
p_entry_0 = f32[32,32]{1,0} parameter(0)
p_entry_1 = s32[32,32]{1,0} parameter(1)
p_entry_2 = s64[32,32]{1,0} parameter(2)
while_init = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p_entry_0, p_entry_0)
ROOT while = (f32[32,32]{1,0}, f32[32,32]{1,0}) while(while_init), condition=while_condition, body=while_body
}
)";
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
*entry_computation = module->entry_computation();
*param0 = (*entry_computation)->parameter_instruction(0);
*param1 = (*entry_computation)->parameter_instruction(1);
*param2 = (*entry_computation)->parameter_instruction(2);
return std::move(module);
}
};
TEST_F(WhileUtilTest, MakeZeroInstructionsLiveOp) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, {}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(param_reconstructed, 0),
op::GetTupleElement(param_reconstructed, 1)));
}
TEST_F(WhileUtilTest, MakeTwoInstructionsLive) {
HloInstruction *param0, *param1, *param2;
HloComputation* entry_computation;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
GetParsedModule(&entry_computation, ¶m0, ¶m1, ¶m2));
HloInstruction* while_instr = entry_computation->root_instruction();
ASSERT_EQ(while_instr->opcode(), HloOpcode::kWhile);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{param0, param1}));
HloInstruction* new_while_instr = make_live_in_result.new_while_instr;
XLA_VLOG_LINES(3, module->ToString());
EXPECT_THAT(
entry_computation->root_instruction(),
op::Tuple(op::GetTupleElement(::testing::Eq(new_while_instr), 0),
op::GetTupleElement(::testing::Eq(new_while_instr), 1)));
auto first_half_param_reconstructed =
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1));
EXPECT_THAT(new_while_instr->while_body()->root_instruction(),
op::Tuple(op::GetTupleElement(first_half_param_reconstructed, 0),
op::GetTupleElement(first_half_param_reconstructed, 1),
op::GetTupleElement(op::Parameter(0), 2),
op::GetTupleElement(op::Parameter(0), 3)));
}
TEST_F(WhileUtilTest, GetInvariantGTEsForWhileBody) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
ROOT constant = pred[] constant(true)
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* while_body = module->GetComputationWithName("body");
ASSERT_NE(while_body, nullptr)
<< "Expected exactly one while_body computation";
std::vector<HloInstruction*> gte_list =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
ASSERT_EQ(gte_list.size(), 1);
EXPECT_EQ((*gte_list.begin())->name(), "gte.0");
}
TEST_F(WhileUtilTest, AlwaysRemovePreviousWhileBody) {
const char* const hlo_string = R"(
HloModule WhileWithSideEffects
body {
param.b = (s32[], s32[]) parameter(0)
gte.0 = s32[] get-tuple-element(param.b), index=0
gte.1 = s32[] get-tuple-element(param.b), index=1
add = s32[] add(gte.0, gte.1)
ROOT tuple = (s32[], s32[]) tuple(gte.0, add)
}
cond {
param.c = (s32[], s32[]) parameter(0)
token0 = token[] after-all()
infeed = (pred[], token[]) infeed(token0)
ROOT condition = pred[] get-tuple-element(infeed), index=0
}
ENTRY main {
init = (s32[], s32[]) parameter(0)
to_make_live_in = f32[100] parameter(1)
ROOT while = (s32[], s32[]) while(init), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* main = module->GetComputationWithName("main");
HloInstruction* while_instr = main->root_instruction();
HloInstruction* to_make_live_in = main->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(
WhileUtil::MakeInstructionsLiveInResult make_live_in_result,
WhileUtil::MakeInstructionsLiveIn(while_instr,
{to_make_live_in}));
auto is_while = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kWhile;
};
EXPECT_EQ(absl::c_count_if(main->instructions(), is_while), 1);
}
}
} |
1,939 | cpp | tensorflow/tensorflow | hlo_graph_dumper | third_party/xla/xla/service/hlo_graph_dumper.cc | third_party/xla/xla/service/hlo_graph_dumper_test.cc | #ifndef XLA_SERVICE_HLO_GRAPH_DUMPER_H_
#define XLA_SERVICE_HLO_GRAPH_DUMPER_H_
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/xla.pb.h"
namespace xla {
enum class RenderedGraphFormat {
kDot,
kHtml,
kUrl,
};
struct HloRenderOptions {
bool show_backend_config = false;
bool show_fusion_subcomputations = true;
bool show_while_subcomputations = true;
bool override_node_colors = false;
};
struct ColorStats {
std::string color;
std::string stats;
};
absl::StatusOr<std::string> RenderGraph(
const HloComputation& computation, absl::string_view label,
const DebugOptions& debug_options, RenderedGraphFormat format,
HloRenderOptions hlo_render_options = {},
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map = std::nullopt);
absl::StatusOr<std::string> RenderAllComputationsToHtml(
const HloModule& module);
absl::StatusOr<std::string> RenderNeighborhoodAround(
const HloInstruction& node, int radius, RenderedGraphFormat format,
HloRenderOptions hlo_render_options = {},
const absl::flat_hash_set<const HloInstruction*>& boundary = {},
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map = std::nullopt);
absl::StatusOr<std::string> RenderAllPathsFromTo(
const HloInstruction& from, const HloInstruction& to, int64_t max_nodes,
RenderedGraphFormat format, HloRenderOptions hlo_render_options = {});
void RegisterFusionState(const HloComputation& computation,
absl::string_view label,
const HloInstruction& consumer,
const HloInstruction* producer = nullptr);
void RegisterGraphToURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view dot)> renderer);
absl::StatusOr<std::string> WrapFusionExplorer(
const HloComputation& computation);
}
#endif
#include "xla/service/hlo_graph_dumper.h"
#include <cstdint>
#include <unordered_map>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/shape.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <deque>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/dnn.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/lib/gtl/map_util.h"
#include "tsl/lib/io/zlib_compression_options.h"
#include "tsl/lib/io/zlib_outputbuffer.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
enum NodeFilterResult {
kNormalNode,
kHideNode,
kHighlightNode,
kSomeOperandsOmitted,
kOmitNodeOperands,
kSomeUsersOmitted,
};
class NodeFilter {
public:
NodeFilter() : filter_([](const HloInstruction*) { return kNormalNode; }) {}
explicit NodeFilter(
std::function<NodeFilterResult(const HloInstruction* instr)> filter,
std::optional<int> num_rendered = std::nullopt)
: filter_(std::move(filter)), num_rendered_(num_rendered) {}
bool Show(const HloInstruction* instr) const {
return filter_(instr) != kHideNode;
}
bool Highlight(const HloInstruction* instr) const {
return filter_(instr) == kHighlightNode;
}
bool OmitOperands(const HloInstruction* instr) const {
return filter_(instr) == kOmitNodeOperands;
}
bool SomeOrAllOperandsOmitted(const HloInstruction* instr) const {
auto result = filter_(instr);
return result == kOmitNodeOperands || result == kSomeOperandsOmitted;
}
bool Deemphasized(const HloInstruction* instr) const {
auto result = filter_(instr);
return result == kOmitNodeOperands || result == kSomeOperandsOmitted ||
result == kSomeUsersOmitted;
}
std::optional<int> GetNumRendered() const { return num_rendered_; }
private:
std::function<NodeFilterResult(const HloInstruction* instr)> filter_;
std::optional<int> num_rendered_;
};
bool IsSmall(const HloInstruction* instr) {
if (ShapeUtil::HasPrimitiveType(instr->shape(), OPAQUE_TYPE) ||
ShapeUtil::HasPrimitiveType(instr->shape(), TOKEN)) {
return true;
}
return ShapeUtil::ElementsInRecursive(instr->shape()) < 4096;
}
enum ColorScheme {
kBlue,
kBrown,
kDarkBlue,
kDarkGreen,
kDarkOrange,
kDarkRed,
kGray,
kGreen,
kOrange,
kPurple,
kRed,
kWhite,
kYellow,
kDashedBorder,
};
struct NodeColors {
std::string style;
std::string fill_color;
std::string stroke_color;
std::string font_color;
};
NodeColors NodeColorsForScheme(ColorScheme color) {
switch (color) {
case kBlue:
return NodeColors{"filled", "#bbdefb", "#8aacc8", "black"};
case kBrown:
return NodeColors{"filled", "#bcaaa4", "#8c7b75", "black"};
case kDarkBlue:
return NodeColors{"filled", "#1565c0", "#003c8f", "white"};
case kDarkGreen:
return NodeColors{"filled", "#2e7d32", "#005005", "white"};
case kDarkOrange:
return NodeColors{"filled", "#ffb74d", "#c88719", "black"};
case kDarkRed:
return NodeColors{"filled", "#b71c1c", "#7f0000", "white"};
case kGray:
return NodeColors{"filled", "#cfd8dc", "#9ea7aa", "black"};
case kGreen:
return NodeColors{"filled", "#c8e6c9", "#97b498", "black"};
case kOrange:
return NodeColors{"filled", "#ffe0b2", "#cbae82", "black"};
case kPurple:
return NodeColors{"filled", "#e1bee7", "#af8eb5", "black"};
case kRed:
return NodeColors{"filled", "#ffcdd2", "#cb9ca1", "black"};
case kWhite:
return NodeColors{"filled", "white", "#9e9e9e", "black"};
case kYellow:
return NodeColors{"filled", "#fff9c4", "#cbc693", "black"};
case kDashedBorder:
return NodeColors{"filled,dashed", "white", "#757575", "#757575"};
}
}
std::string NodeFillColorForStatistic(const Statistic& statistic) {
auto stat_val = statistic.stat_val();
if (stat_val == 0) {
return "#f5f5f5";
} else if (stat_val < 10) {
return "#f7d4cc";
} else if (stat_val < 20) {
return "#f8b2a3";
} else if (stat_val < 30) {
return "#f9a28f";
} else if (stat_val < 40) {
return "#fa917b";
} else if (stat_val < 50) {
return "#fb8066";
} else if (stat_val < 60) {
return "#fc7052";
} else if (stat_val < 70) {
return "#fd5f3d";
} else if (stat_val < 80) {
return "#fd4e29";
} else if (stat_val < 90) {
return "#fe3e14";
} else {
return "#ff2d00";
}
}
std::string NodeFontColorForStatistic(const Statistic& statistic) {
if (statistic.stat_val() < 60) {
return "black";
} else {
return "white";
}
}
std::string NodeColorAttributes(ColorScheme color) {
NodeColors node_colors = NodeColorsForScheme(color);
return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
}
std::string HtmlLikeStringSanitize(absl::string_view s) {
return absl::StrReplaceAll(s,
{{"<", "<"}, {">", ">"}, {"\"", """}});
}
bool IsFusedBroadcastOfConstantEffectiveScalar(const HloInstruction* instr) {
namespace m = match;
return instr->parent()->IsFusionComputation() &&
Match(instr, m::Broadcast(m::ConstantEffectiveScalar()));
}
optional<std::string> MatchTrivialComputation(
const HloComputation* computation) {
namespace m = match;
if (computation->instruction_count() != 3) {
return nullopt;
}
HloInstruction* root = computation->root_instruction();
const HloInstruction *param0, *param1;
if (!Match(root, m::Op()
.WithNumOperands(2)
.WithShape(m::Shape().IsEffectiveScalar())
.WithBinaryOperandsAnyOrder(
m::Parameter(¶m0, 0)
.WithShape(m::Shape().IsEffectiveScalar()),
m::Parameter(¶m1, 1)
.WithShape(m::Shape().IsEffectiveScalar())))) {
return nullopt;
}
if (root->operand(0) == param1) {
CHECK_EQ(root->operand(1), param0);
if (root->opcode() == HloOpcode()) {
switch (root->comparison_direction()) {
case ComparisonDirection::kLe:
case ComparisonDirection::kGe:
case ComparisonDirection::kGt:
case ComparisonDirection::kLt:
return nullopt;
default:
break;
}
}
}
switch (root->opcode()) {
case HloOpcode::kAdd:
return "add";
case HloOpcode::kMultiply:
return "multiply";
case HloOpcode::kMinimum:
return "min";
case HloOpcode::kMaximum:
return "max";
case HloOpcode::kXor:
return "xor";
case HloOpcode::kAnd:
return "and";
case HloOpcode::kOr:
return "or";
case HloOpcode::kCompare: {
switch (root->comparison_direction()) {
case ComparisonDirection::kLe:
return "less-or-equal";
case ComparisonDirection::kGe:
return "greater-or-equal";
case ComparisonDirection::kGt:
return "greater-than";
case ComparisonDirection::kLt:
return "less-than";
case ComparisonDirection::kEq:
return "equal-to";
case ComparisonDirection::kNe:
return "not-equal-to";
}
}
default:
return nullopt;
}
}
class HloDotDumper {
public:
HloDotDumper(
const HloComputation* computation, absl::string_view label,
const DebugOptions& debug_options, HloRenderOptions hlo_render_options,
NodeFilter filter,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map = std::nullopt)
: computation_(computation),
label_(label),
debug_options_(debug_options),
hlo_render_options_(hlo_render_options),
filter_(std::move(filter)),
color_map_(color_map) {}
std::string Dump();
std::optional<std::string> CssIdForInstruction(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
auto it = cluster_ids_.find(instr.called_computations()[0]);
if (it == cluster_ids_.end()) {
return std::nullopt;
}
return StrCat("#a_clust", it->second, " path");
}
auto it = node_ids_.find(&instr);
if (it == node_ids_.end()) {
return std::nullopt;
}
return StrCat("#node", it->second, " polygon");
}
private:
std::string InstructionId(const HloInstruction* instruction) {
return StrCat(reinterpret_cast<uint64_t>(instruction));
}
std::string SubcomputationId(const HloComputation* computation) {
return StrCat("cluster_", reinterpret_cast<uint64_t>(computation));
}
std::string Header();
std::string Footer();
bool ShouldShowSubcomputation(const HloComputation* subcomp);
bool ShouldShowFusionSubcomputation(const HloInstruction* instr);
bool ShouldMergeIntoUsers(const HloInstruction* instr) const;
std::string DumpSubcomputation(const HloComputation* subcomp,
const HloInstruction* parent_instr);
std::string DumpComputation(const HloComputation* comp);
std::string DumpRootTag();
std::string DumpInstruction(const HloInstruction* instr);
ColorScheme GetInstructionColor(const HloInstruction* instr);
std::string GetInstructionNodeShape(const HloInstruction* instr);
std::string GetInstructionNodeLabel(const HloInstruction* instr);
std::string GetInstructionNodeMetadata(const HloInstruction* instr);
std::string GetInstructionNodeBackendConfig(const HloInstruction* instr);
std::string GetInstructionNodeExtraInfo(const HloInstruction* instr);
std::string GetInstructionNodeInlinedOperands(const HloInstruction* instr);
void AddInstructionIncomingEdges(const HloInstruction* instr);
const HloInstruction* GetNodeForEdge(const HloInstruction* instr);
std::string GetInstructionTrivialComputationStr(const HloInstruction* instr);
const HloComputation* computation_;
const std::string label_;
const DebugOptions& debug_options_;
const HloRenderOptions hlo_render_options_;
const NodeFilter filter_;
const std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map_;
int64_t next_node_id_ = 1;
absl::flat_hash_map<const HloInstruction*, int64_t> node_ids_;
int64_t root_node_id_;
int64_t next_edge_id_ = 1;
std::unordered_multimap<
std::pair<const HloInstruction*, const HloInstruction*>, int64_t,
absl::Hash<std::pair<const HloInstruction*, const HloInstruction*>>>
edge_ids_;
int64_t next_cluster_id_ = 1;
absl::flat_hash_map<const HloComputation*, int64_t> cluster_ids_;
std::vector<std::string> edges_;
absl::flat_hash_map<HloSharding, ColorScheme> sharding_colors_;
int64_t next_shard_color_ = 0;
};
std::string HloDotDumper::Dump() {
std::string body;
StrAppend(&body, DumpComputation(computation_));
StrAppend(&body, DumpRootTag());
std::string g = Header();
StrAppend(&g, body);
StrAppend(&g, Footer());
return g;
}
std::string HloDotDumper::Header() {
constexpr char fmt[] = R"(digraph G {
rankdir = TB;
compound = true;
label = <<b>%s</b>>;
labelloc = t;
tooltip = " ";
stylesheet=<
data:text/css,
@import url(https:
svg text {
font-family: 'Roboto';
font-size: 12px;
}
%s
>
)";
VLOG(3) << "Generating Header";
std::string graph_label =
StrCat(label_, "<br/>Computation ", computation_->name());
if (computation_->IsFusionComputation()) {
StrAppend(&graph_label, " (in fusion instruction ",
computation_->FusionInstruction()->name(), ")");
}
std::vector<std::string> edge_css_rules;
std::string kBlue = "#1976d2";
std::string kRed = "#d32f2f";
for (const auto& kv : edge_ids_) {
const HloInstruction* from_node = kv.first.first;
const HloInstruction* to_node = kv.first.second;
int64_t edge_id = kv.second;
auto add_hover_css_rule = [&](std::string elem_type, int64_t elem_id,
std::string color) {
edge_css_rules.push_back(
StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n"
" #%s%d:hover ~ #edge%d path { "
"stroke: %s; stroke-width: .2em; }\n"
" #%s%d:hover ~ #edge%d polygon { "
"fill: %s; stroke: %s; stroke-width: .2em; }\n",
elem_type, elem_id, edge_id, color,
elem_type, elem_id, edge_id, color,
elem_type, elem_id, edge_id, color, color));
};
int64_t from_node_id = tsl::gtl::FindWithDefault(node_ids_, from_node, -1);
if (from_node_id == -1) {
LOG(FATAL) << from_node->name() << " was added to edges but not to nodes";
}
int64_t to_node_id = to_node
? tsl::gtl::FindWithDefault(node_ids_, to_node, -1)
: root_node_id_;
if (to_node != nullptr && to_node_id == -1) {
LOG(FATAL) << to_node->name() << " was added to edges but not to nodes";
}
add_hover_css_rule("node", from_node_id, kBlue);
add_hover_css_rule("node", to_node_id, kRed);
if (to_node) {
VLOG(3) << "Adding css for edge " << edge_id << " from node "
<< from_node->name() << " to node " << to_node->name();
} else {
VLOG(3) << "Adding css for edge " << edge_id << " from node "
<< from_node->name() << " to root tag";
}
if (to_node) {
if (from_node->IsFused() &&
from_node->parent()->root_instruction() == from_node) {
int64_t cluster_id = cluster_ids_.at(from_node->parent());
add_hover_css_rule("clust", cluster_id, kBlue);
}
if (to_node->IsFused() && to_node->opcode() == HloOpcode::kParameter) {
int64_t cluster_id = cluster_ids_.at(to_node->parent());
add_hover_css_rule("clust", cluster_id, kRed);
}
}
}
return StrFormat(
fmt, graph_label,
absl::StrReplaceAll(StrJoin(edge_css_rules, "\n"), {{"#", "%23"}}));
}
std::string HloDotDumper::Footer() {
return StrCat(StrJoin(edges_, "\n"), "\n}");
}
bool HloDotDumper::ShouldShowFusionSubcomputation(const HloInstruction* instr) {
CHECK_EQ(instr->opcode(), HloOpcode::kFusion);
return ShouldShowSubcomputation(instr->fused_instructions_computation());
}
bool HloDotDumper::ShouldShowSubcomputation(const HloComputation* subcomp) {
if (subcomp->IsFusionComputation()) {
const HloInstruction* fusion = subcomp->FusionInstruction();
if (!filter_.Show(fusion) || filter_.SomeOrAllOperandsOmitted(fusion) ||
!hlo_render_options_.show_fusion_subcomputations) {
return false;
}
}
if (!subcomp->IsFusionComputation() && MatchTrivialComputation(subcomp)) {
return false;
}
if (subcomp->WhileCallInstruction() != nullptr &&
!hlo_render_options_.show_while_subcomputations) {
return false;
}
return absl::c_any_of(
subcomp->instructions(),
[&](const HloInstruction* instr) { return filter_.Show(instr); });
}
std::string HloDotDumper::DumpSubcomputation(
const HloComputation* subcomp, const HloInstruction* parent_instr) {
VLOG(2) << "Dumping subcomputation " << subcomp->name();
if (parent_instr->opcode() != HloOpcode::kFusion) {
const HloInstruction* from = GetNodeForEdge(subcomp->root_instruction());
VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name()
<< " as " << next_edge_id_;
edge_ids_.insert({{from, parent_instr}, next_edge_id_++});
constexpr char edge_fmt[] =
R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)";
edges_.push_back(StrFormat(
edge_fmt, InstructionId(from), InstructionId(parent_instr),
SubcomputationId(subcomp), subcomp->name(), parent_instr->name()));
}
if (cluster_ids_.find(subcomp) != cluster_ids_.end()) {
return "";
}
cluster_ids_[subcomp] = next_cluster_id_++;
std::string id = SubcomputationId(subcomp);
std::string subcomp_label, style;
if (parent_instr->opcode() == HloOpcode::kFusion) {
subcomp_label =
StrFormat("Fused expression for <b>%s</b><br/>%s",
HtmlLikeStringSanitize(parent_instr->name()),
HtmlLikeStringSanitize(parent_instr->ToCategory()));
std::string extra_info = GetInstructionNodeExtraInfo(parent_instr);
if (!extra_info.empty()) {
StrAppend(&subcomp_label, "<br/>", extra_info);
}
std::string node_backend_config =
GetInstructionNodeBackendConfig(parent_instr);
if (!node_backend_config.empty()) {
StrAppend(&subcomp_label, "<br/>", node_backend_config);
}
bool highlight = filter_.Highlight(parent_instr);
std::string fillcolor;
std::string strokecolor;
if (!highlight && (parent_instr->module_has_statistics() ||
parent_instr->has_statistics())) { | #include "xla/service/hlo_graph_dumper.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
namespace xla {
namespace {
using absl::StrCat;
using ::testing::HasSubstr;
using HloGraphDumperTest = HloTestBase;
std::string TestName() {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
TEST_F(HloGraphDumperTest, NestedFusion) {
HloComputation::Builder b("b");
auto shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> params;
for (int i = 0; i <= 4; ++i) {
params.push_back(b.AddInstruction(
HloInstruction::CreateParameter(i, shape, StrCat("param", i))));
}
std::vector<HloInstruction*> sums;
sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, params[0], params[1])));
for (int i = 0; i <= 2; ++i) {
sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, sums[i], params[i + 2])));
}
HloModuleConfig config;
HloModule m(TestName(), config);
m.AddEntryComputation(b.Build());
HloComputation* root_computation = m.entry_computation();
auto* outer_fusion = root_computation->CreateFusionInstruction(
{sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop);
std::vector<HloInstruction*> fused_sums;
for (auto* instr : outer_fusion->fused_instructions_computation()
->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kAdd) {
fused_sums.push_back(instr);
}
}
auto* inner_fusion =
outer_fusion->fused_instructions_computation()->CreateFusionInstruction(
{fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop);
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "", DebugOptions(),
RenderedGraphFormat::kDot));
for (const HloComputation* computation :
{root_computation,
inner_fusion->fused_instructions_computation(),
outer_fusion->fused_instructions_computation()}) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_THAT(graph, HasSubstr(instruction->name()));
}
}
const HloInstruction* inner_sum = nullptr;
for (const HloInstruction* instruction :
inner_fusion->fused_instructions_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kAdd) {
inner_sum = instruction;
break;
}
}
ASSERT_NE(inner_sum, nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph,
RenderNeighborhoodAround(*inner_sum, 1,
RenderedGraphFormat::kDot));
EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name()));
}
TEST_F(HloGraphDumperTest, Constant) {
HloComputation::Builder b("b");
auto instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-42)));
instruction->SetAndSanitizeName("i_am_a_constant_root_instruction");
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "an_empty_graph", DebugOptions(),
RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("an_empty_graph"));
}
TEST_F(HloGraphDumperTest, TupleConstant) {
Shape tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})});
HloComputation::Builder b("b");
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape)));
auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeShape(F32, {3, 2}), constant, 0));
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build(gte));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "tuple_constant", DebugOptions(),
RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("tuple_constant"));
EXPECT_THAT(graph, HasSubstr("constant (f32[3,2], s32[4,5])"));
}
TEST_F(HloGraphDumperTest, Compare) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0)
param.1 = f32[10] parameter(1)
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("direction=LT"));
}
TEST_F(HloGraphDumperTest, HasStatisticsViz) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5}
param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4}
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
}
TEST_F(HloGraphDumperTest, RootIsConstant) {
const char* hlo_string = R"(
HloModule indexed_conditional
%then_branch (empty: ()) -> f32[] {
%empty = () parameter(0)
ROOT %then = f32[] constant(1)
}
%else_branch (empty.1: ()) -> f32[] {
%empty.1 = () parameter(0)
ROOT %else = f32[] constant(2)
}
ENTRY %conditional_select (constant: pred[]) -> (f32[]) {
%constant = pred[] parameter(0)
%emptytuple = () tuple()
%conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch
ROOT %t = (f32[]) tuple(f32[] %conditional)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
}
TEST_F(HloGraphDumperTest, OverrideColors) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0)
param.1 = f32[10] parameter(1)
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<const HloInstruction*, ColorStats> color_map;
ColorStats color_stats_1;
color_stats_1.color = "#A9C343";
color_stats_1.stats = absl::StrFormat("%.3f", 1.11);
ColorStats color_stats_2;
color_stats_2.color = "#BC8A3F";
color_stats_2.stats = absl::StrFormat("%.3f", 2.22);
color_map[module->entry_computation()->GetInstructionWithName("param.0")] =
color_stats_1;
color_map[module->entry_computation()->GetInstructionWithName("param.1")] =
color_stats_2;
HloRenderOptions hlo_render_options;
hlo_render_options.override_node_colors = true;
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options,
color_map));
EXPECT_THAT(graph, HasSubstr("#A9C343"));
EXPECT_THAT(graph, HasSubstr("1.110"));
EXPECT_THAT(graph, HasSubstr("#BC8A3F"));
EXPECT_THAT(graph, HasSubstr("2.220"));
}
}
} |
1,940 | cpp | tensorflow/tensorflow | call_inliner | third_party/xla/xla/service/call_inliner.cc | third_party/xla/xla/service/call_inliner_test.cc | #ifndef XLA_SERVICE_CALL_INLINER_H_
#define XLA_SERVICE_CALL_INLINER_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CallInliner : public HloModulePass {
public:
using InlinedInstructionMap =
absl::flat_hash_map<HloInstruction*, HloInstruction*>;
static absl::StatusOr<InlinedInstructionMap> Inline(HloInstruction* call);
explicit CallInliner(bool single_call_site = false,
bool update_domain = false)
: single_call_site_(single_call_site), update_domain_(update_domain) {}
~CallInliner() override = default;
absl::string_view name() const override { return "call-inliner"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
virtual bool IsInlineableCallOp(HloInstruction* instruction) const;
private:
bool single_call_site_;
bool update_domain_;
};
}
#endif
#include "xla/service/call_inliner.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_domain_isolator.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault {
public:
explicit SubcomputationInsertionVisitor(HloInstruction* call)
: call_(call), outer_(call->parent()) {
CHECK_EQ(HloOpcode::kCall, call_->opcode());
}
absl::Status DefaultAction(HloInstruction* hlo) override {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_operand, Resolve(operand));
new_operands.push_back(new_operand);
}
VLOG(1) << "Cloning HLO and adding to caller: " << hlo->ToString();
auto new_hlo = hlo->CloneWithNewOperands(hlo->shape(), new_operands);
HloInstruction* new_hlo_pointer =
outer_->AddInstruction(std::move(new_hlo));
TF_RETURN_IF_ERROR(NoteMapping(hlo, new_hlo_pointer));
for (HloInstruction* control_predecessor : hlo->control_predecessors()) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_control_predecessor,
Resolve(control_predecessor));
TF_RETURN_IF_ERROR(
new_control_predecessor->AddControlDependencyTo(new_hlo_pointer));
}
return absl::OkStatus();
}
absl::Status HandleParameter(HloInstruction* parameter) override {
TF_RETURN_IF_ERROR(NoteMapping(
parameter, call_->mutable_operand(parameter->parameter_number())));
return absl::OkStatus();
}
absl::Status FinishVisit(HloInstruction* root) override {
TF_ASSIGN_OR_RETURN(HloInstruction * new_root, Resolve(root));
VLOG(1) << "Replacing all uses of " << call_->ToString()
<< " with new root " << new_root->ToString();
return outer_->ReplaceInstruction(call_, new_root);
}
CallInliner::InlinedInstructionMap ConsumeInstructionMap() {
return std::move(subcomputation_hlo_to_new_hlo_);
}
private:
absl::StatusOr<HloInstruction*> Resolve(HloInstruction* subcomputation_hlo) {
auto it = subcomputation_hlo_to_new_hlo_.find(subcomputation_hlo);
if (it == subcomputation_hlo_to_new_hlo_.end()) {
return NotFound(
"Could not find mapping from subcomputation HLO %s to a cloned HLO.",
subcomputation_hlo->ToString());
}
return it->second;
}
absl::Status NoteMapping(HloInstruction* subcomputation_hlo,
HloInstruction* new_hlo) {
auto result = subcomputation_hlo_to_new_hlo_.insert(
std::make_pair(subcomputation_hlo, new_hlo));
TF_RET_CHECK(result.second)
<< "A mapping for the subcomputation HLO is already present.";
return absl::OkStatus();
}
HloInstruction* call_;
HloComputation* outer_;
CallInliner::InlinedInstructionMap subcomputation_hlo_to_new_hlo_;
};
}
absl::StatusOr<CallInliner::InlinedInstructionMap>
CallInliner::Inline(HloInstruction* call) {
TF_RET_CHECK(call->opcode() == HloOpcode::kCall)
<< "Instruction was not a call op: " << call->opcode();
const auto& callees = call->called_computations();
TF_RET_CHECK(callees.size() == 1);
HloComputation* callee = callees[0];
SubcomputationInsertionVisitor visitor(call);
TF_RETURN_IF_ERROR(callee->Accept(&visitor));
return visitor.ConsumeInstructionMap();
}
bool CallInliner::IsInlineableCallOp(HloInstruction* instruction) const {
return instruction->opcode() == HloOpcode::kCall &&
!instruction->parent()->IsAsyncComputation();
}
absl::StatusOr<bool> CallInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
bool did_mutate = false;
TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node)
-> absl::Status {
if (!HloInstruction::IsThreadIncluded(
node.computation()->execution_thread(), execution_threads)) {
return absl::OkStatus();
}
VLOG(1) << "Visiting node: " << node.ToString();
for (HloInstruction* instruction :
node.computation()->MakeInstructionPostOrder()) {
if (IsInlineableCallOp(instruction)) {
const auto& callees = instruction->called_computations();
TF_RET_CHECK(callees.size() == 1);
if (!single_call_site_ || call_graph->GetNode(instruction->to_apply())
.caller_callsites()
.size() == 1) {
TF_ASSIGN_OR_RETURN(CallInliner::InlinedInstructionMap inline_map,
Inline(instruction));
if (update_domain_) {
HloDomainIsolator isolator(
[]() { return ShardingDomainCreator{}; });
for (const auto& [call_inst, inlined_inst] : inline_map) {
TF_RETURN_IF_ERROR(isolator.UpdateDomains(inlined_inst).status());
}
}
did_mutate = true;
}
}
}
return absl::OkStatus();
}));
if (did_mutate) {
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
return did_mutate;
}
} | #include "xla/service/call_inliner.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using CallInlinerTest = HloTestBase;
TEST_F(CallInlinerTest, ControlDependenciesAreCarriedToCaller) {
HloComputation::Builder inner(TestName() + ".inner");
HloInstruction* zero = inner.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(24.0f)));
HloInstruction* one = inner.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
TF_ASSERT_OK(zero->AddControlDependencyTo(one));
auto module = CreateNewVerifiedModule();
HloComputation* inner_computation =
module->AddEmbeddedComputation(inner.Build());
HloComputation::Builder outer(TestName() + ".outer");
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
outer.AddInstruction(
HloInstruction::CreateCall(r0f32, {}, inner_computation));
auto computation = module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(computation->root_instruction(), op::Constant());
EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(),
42);
ASSERT_EQ(1, computation->root_instruction()->control_predecessors().size());
auto prior = computation->root_instruction()->control_predecessors()[0];
EXPECT_THAT(prior, op::Constant());
EXPECT_EQ(prior->literal().GetFirstElement<float>(), 24);
}
TEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder just_false(TestName() + ".false");
just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
HloComputation::Builder call_false_builder(TestName() + ".call_false");
call_false_builder.AddInstruction(
HloInstruction::CreateParameter(0, pred, "param"));
call_false_builder.AddInstruction(
HloInstruction::CreateCall(pred, {}, false_computation));
HloComputation* call_false =
module->AddEmbeddedComputation(call_false_builder.Build());
HloComputation::Builder outer(TestName() + ".outer");
HloInstruction* init_value = outer.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
outer.AddInstruction(
HloInstruction::CreateWhile(pred, call_false, call_false, init_value));
auto computation = module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(
computation->root_instruction()->while_condition()->root_instruction(),
op::Constant());
EXPECT_THAT(computation->root_instruction()->while_body()->root_instruction(),
op::Constant());
}
TEST_F(CallInlinerTest, InlineWithoutRunningPass) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder just_false(TestName() + ".false");
auto* true_constant = just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<bool>({true})));
auto* false_constant = just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
TF_ASSERT_OK(false_constant->AddControlDependencyTo(true_constant));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
HloComputation::Builder call_false_builder(TestName() + ".call_false");
HloInstruction* call = call_false_builder.AddInstruction(
HloInstruction::CreateCall(pred, {}, false_computation));
auto computation = module->AddEntryComputation(call_false_builder.Build());
TF_ASSERT_OK(CallInliner::Inline(call).status());
EXPECT_THAT(computation->root_instruction(), op::Constant());
EXPECT_THAT(computation->root_instruction()->control_successors(),
ElementsAre(op::Constant()));
}
TEST_F(CallInlinerTest, InlineWithEmptyComputation) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder empty(TestName() + ".empty");
empty.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A"));
empty.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
HloComputation* empty_computation =
module->AddEmbeddedComputation(empty.Build());
HloComputation::Builder empty2(TestName() + ".empty");
empty2.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A"));
empty2.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
HloComputation* empty2_computation =
module->AddEmbeddedComputation(empty2.Build());
HloComputation::Builder entry("entry");
auto zero = entry.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {zero}, empty_computation));
HloInstruction* call1 = entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {zero}, empty2_computation));
entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {call1}, empty_computation));
auto computation = module->AddEntryComputation(entry.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(computation->root_instruction(), op::Constant());
}
TEST_F(CallInlinerTest, CallToOutfeedComputationIsInlined) {
const Shape f32 = ShapeUtil::MakeShape(F32, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder outfeeder(TestName() + ".outfeeder");
auto value = outfeeder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto token = outfeeder.AddInstruction(HloInstruction::CreateToken());
outfeeder.AddInstruction(
HloInstruction::CreateOutfeed(f32, value, token, ""));
auto outfeed_computation = module->AddEmbeddedComputation(outfeeder.Build());
HloComputation::Builder outer(TestName() + ".outer");
outer.AddInstruction(HloInstruction::CreateCall(
outfeed_computation->root_instruction()->shape(), {},
outfeed_computation));
module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
}
TEST_F(CallInlinerTest, InlineSingleUseCalleesOnly) {
const absl::string_view hlo_string = R"(
HloModule inline_module
a {
ROOT tuple = () tuple()
}
b {
ROOT tuple.1 = () tuple()
}
ENTRY inline {
a = () call(), to_apply=a
b = () call(), to_apply=a
c = () call(), to_apply=b
ROOT tuple = ((), (), ()) tuple(a, b, c)
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CallInliner call_inliner(true);
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
ASSERT_EQ(module->entry_computation()->instruction_count(), 4);
auto inst = module->entry_computation()->instructions().begin();
EXPECT_THAT(*inst, op::Call());
++inst;
EXPECT_THAT(*inst, op::Call());
++inst;
EXPECT_THAT(*inst, op::Tuple());
++inst;
EXPECT_THAT(*inst, op::Tuple());
}
TEST_F(CallInlinerTest, InliningPerformedInsideSpecifiedThreadsOnly) {
const std::string hlo_string = R"(
HloModule inline_specified_threads_only
%secondary_inner () -> u32[] {
ROOT %co.2 = u32[] constant(2)
}, execution_thread="secondary_thread"
%secondary_outer () -> u32[] {
%co.1 = u32[] constant(1)
%call.1 = u32[] call(), to_apply=%secondary_inner
ROOT %add.1 = add(%co.1, %call.1)
}, execution_thread="secondary_thread"
%main_inner () -> u32[] {
%co.0 = u32[] constant(0)
%async-start = ((), u32[], u32[]) call-start(), async_execution_thread="secondary_thread", to_apply=secondary_outer
%async-done = u32[] call-done(((), u32[], u32[]) %async-start)
ROOT %add.2 = add(%co.0, %async-done)
}
ENTRY %main_outer (p0: u32[]) -> u32[] {
%p.0 = u32[] parameter(0)
%call.0 = u32[] call(), to_apply=%main_inner
ROOT %add.3 = add(%p.0, %call.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto module_clone = module->Clone("");
{
VLOG(1) << "Module BEFORE CallInliner\n" << module->ToString();
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
VLOG(1) << "Module AFTER CallInliner\n" << module->ToString();
EXPECT_TRUE(mutated);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)),
op::AsyncDone())));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->operand(1)
->operand(1)
->async_wrapped_instruction()
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)),
op::Constant(LiteralUtil::CreateR0<uint32_t>(2))));
}
VLOG(1) << "Restricting CallInliner to the secondary thread.";
{
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(
bool mutated,
call_inliner.Run(module_clone.get(), {"secondary_thread"}));
VLOG(1) << "Module AFTER CallInliner\n" << module_clone->ToString();
EXPECT_TRUE(mutated);
EXPECT_THAT(module_clone->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Call()));
EXPECT_THAT(module_clone->entry_computation()
->root_instruction()
->operand(1)
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)),
op::AsyncDone()));
EXPECT_THAT(module_clone->entry_computation()
->root_instruction()
->operand(1)
->called_computations()
.at(0)
->root_instruction()
->operand(1)
->async_wrapped_instruction()
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)),
op::Constant(LiteralUtil::CreateR0<uint32_t>(2))));
}
}
}
} |
1,941 | cpp | tensorflow/tensorflow | copy_insertion | third_party/xla/xla/service/copy_insertion.cc | third_party/xla/xla/service/copy_insertion_test.cc | #ifndef XLA_SERVICE_COPY_INSERTION_H_
#define XLA_SERVICE_COPY_INSERTION_H_
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CopyInsertion : public HloModulePass {
public:
absl::string_view name() const override { return "copy-insertion"; }
static constexpr int64_t kUseRegionAnalysisLimit = 0;
explicit CopyInsertion(
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr,
int64_t use_region_based_live_range_analysis = kUseRegionAnalysisLimit)
: can_share_buffer_(can_share_buffer),
use_region_based_live_range_analysis_(
use_region_based_live_range_analysis) {}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::Status RemoveUnnecessaryCopies(
HloModule* module, bool check_live_range_ordering = false,
const absl::flat_hash_set<absl::string_view>& execution_threads = {});
absl::Status AddSpecialCaseCopies(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {});
protected:
virtual absl::Status AddSpecialCaseCopies(
const CallGraph& call_graph,
const absl::flat_hash_set<absl::string_view>& execution_threads,
HloModule* module);
virtual absl::Status AddCopiesForConditional(
const HloAliasAnalysis& alias_analysis, HloInstruction* conditional);
HloDataflowAnalysis::CanShareBuffer can_share_buffer_;
private:
absl::Status AddCopiesToResolveInterference(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
int64_t use_region_based_live_range_analysis_;
};
}
#endif
#include "xla/service/copy_insertion.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/compile_time_cap.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/status_macros.h"
#include "xla/util.h"
namespace xla {
namespace {
using absl::StrAppend;
bool IsReadonlyEntryParameterValue(const HloValue& value) {
const HloComputation* computation = value.defining_instruction()->parent();
return value.defining_instruction()->opcode() == HloOpcode::kParameter &&
computation == computation->parent()->entry_computation() &&
!computation->parent()->input_output_alias_config().ParameterHasAlias(
value.defining_instruction()->parameter_number(), value.index());
}
bool IsConstantValue(const HloValue& value) {
return value.defining_instruction()->opcode() == HloOpcode::kConstant;
}
bool ValueIsReadOnly(const HloValue& value) {
return IsConstantValue(value) || IsReadonlyEntryParameterValue(value);
}
struct SpecialCaseCopyPolicy {
bool copy_root_replicated_buffers = false;
bool copy_parameters_and_constants = false;
};
SpecialCaseCopyPolicy GetSpecialCaseCopyPolicy(const CallGraphNode& node,
HloModule* module,
HloComputation* computation) {
SpecialCaseCopyPolicy policy;
if (computation == module->entry_computation()) {
policy.copy_parameters_and_constants = true;
policy.copy_root_replicated_buffers = true;
}
return policy;
}
bool ShouldCopyRootValue(const HloValue& value,
const SpecialCaseCopyPolicy& policy) {
if (policy.copy_parameters_and_constants) {
return ValueIsReadOnly(value);
}
return false;
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
DeepCopyAndAddControlEdges(HloInstruction* from, HloInstruction* to,
const ShapeTree<bool>& indices_to_copy) {
DCHECK(ShapeUtil::Compatible(from->shape(), to->shape()));
ShapeTree<HloInstruction*> from_copy_tree(from->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * from_deep_copy,
from->parent()->DeepCopyInstruction(
from, &indices_to_copy, &from_copy_tree));
ShapeTree<HloInstruction*> to_copy_tree(to->shape(), nullptr);
TF_ASSIGN_OR_RETURN(
HloInstruction * to_deep_copy,
to->parent()->DeepCopyInstruction(to, &indices_to_copy, &to_copy_tree));
for (const auto& pair : from_copy_tree) {
const ShapeIndex& index = pair.first;
HloInstruction* from_copy = pair.second;
HloInstruction* to_copy = to_copy_tree.element(index);
if (from_copy == nullptr) {
TF_RET_CHECK(to_copy == nullptr);
continue;
}
TF_RET_CHECK(to_copy != nullptr);
TF_RETURN_IF_ERROR(from_copy->AddControlDependencyTo(to_copy));
}
return std::make_pair(from_deep_copy, to_deep_copy);
}
bool IndicesToCopyForWhile(const HloDataflowAnalysis& dataflow,
const HloInstruction* xla_while,
ShapeTree<bool>* indices_to_copy) {
DCHECK(ShapeUtil::Compatible(indices_to_copy->shape(), xla_while->shape()));
bool any_copies = false;
const HloInstruction* init = xla_while->operand(0);
for (auto& pair : *indices_to_copy) {
const ShapeIndex& index = pair.first;
bool& should_copy = pair.second;
if (dataflow.GetValueSet(init, index).values().size() > 1 ||
dataflow.GetValueSet(xla_while, index).values().size() > 1) {
should_copy = true;
} else {
should_copy = dataflow.GetUniqueValueAt(xla_while, index) !=
dataflow.GetUniqueValueAt(init, index);
}
any_copies |= should_copy;
}
return any_copies;
}
bool IndicesToCopyForConditional(const HloDataflowAnalysis& dataflow,
const HloInstruction* xla_conditional,
ShapeTree<bool>* indices_to_copy) {
DCHECK(ShapeUtil::Compatible(indices_to_copy->shape(),
xla_conditional->shape()));
bool any_copies = false;
for (auto& pair : *indices_to_copy) {
const ShapeIndex& index = pair.first;
bool& should_copy = pair.second;
CHECK_EQ(dataflow.GetValueSet(xla_conditional, index).values().size(), 1);
auto value = dataflow.GetValueSet(xla_conditional, index).values()[0];
should_copy =
value->is_phi() && value->defining_instruction() == xla_conditional;
any_copies |= should_copy;
}
return any_copies;
}
absl::Status AddCopiesForWhile(const HloAliasAnalysis& alias_analysis,
HloInstruction* xla_while) {
VLOG(2) << "Adding copies for kWhile instruction " << xla_while->name();
TF_RET_CHECK(xla_while->opcode() == HloOpcode::kWhile);
ShapeTree<bool> indices_to_copy(xla_while->shape());
if (!IndicesToCopyForWhile(alias_analysis.dataflow_analysis(), xla_while,
&indices_to_copy)) {
VLOG(2) << "No copies necessary for kWhile instruction "
<< xla_while->name();
return absl::OkStatus();
}
VLOG(2) << "Adding copies for " << xla_while->name() << " at indices:";
for (auto& pair : indices_to_copy) {
if (pair.second) {
VLOG(2) << " " << pair.first;
}
}
HloInstruction* while_init = xla_while->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * while_init_copy,
xla_while->parent()->DeepCopyInstruction(while_init, &indices_to_copy));
TF_RETURN_IF_ERROR(while_init->ReplaceUseWith(xla_while, while_init_copy));
HloComputation* body = xla_while->while_body();
HloInstruction* param = body->parameter_instruction(0);
HloInstruction* root = body->root_instruction();
TF_RET_CHECK(param != root);
std::vector<HloInstruction*> param_users = param->users();
TF_ASSIGN_OR_RETURN(auto pair,
DeepCopyAndAddControlEdges(param, root, indices_to_copy));
HloInstruction* param_copy = pair.first;
HloInstruction* root_copy = pair.second;
for (HloInstruction* user : param_users) {
TF_RETURN_IF_ERROR(param->ReplaceUseWith(user, param_copy));
}
body->set_root_instruction(root_copy);
return absl::OkStatus();
}
absl::Status AddCopiesForInPlaceOperation(
const HloAliasAnalysis& alias_analysis, HloInstruction* in_place_op,
int64_t operand_number) {
VLOG(2) << "Adding copies for in-place operation " << in_place_op->name();
HloInstruction* operand = in_place_op->mutable_operand(operand_number);
TF_ASSIGN_OR_RETURN(HloInstruction * deep_copy,
in_place_op->parent()->DeepCopyInstruction(operand));
TF_RETURN_IF_ERROR(
operand->ReplaceUseWith(in_place_op, operand_number, deep_copy));
return absl::OkStatus();
}
absl::Status AddCopiesForAliasedInputOutputs(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloComputation* entry = module->entry_computation();
if (!HloInstruction::IsThreadIncluded(entry->execution_thread(),
execution_threads)) {
return absl::OkStatus();
}
HloInstruction* root = entry->root_instruction();
ShapeTree<bool> output_indices_to_copy(root->shape());
std::vector<std::optional<ShapeTree<HloInstruction*>>> copied_parameters(
entry->num_parameters());
bool has_alias = false;
for (auto* param : entry->parameter_instructions()) {
bool param_has_alias = false;
ShapeTree<bool> param_indices_to_copy(param->shape());
module->input_output_alias_config().ForEachAlias(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias& alias) {
if (alias.parameter_number == param->parameter_number()) {
param_has_alias = true;
*(param_indices_to_copy.mutable_element(alias.parameter_index)) =
true;
*(output_indices_to_copy.mutable_element(output_index)) = true;
}
});
if (!param_has_alias) {
continue;
}
TF_RET_CHECK(param->parameter_number() < entry->num_parameters());
TF_RET_CHECK(!copied_parameters[param->parameter_number()]);
has_alias = true;
std::vector<HloInstruction*> users = param->users();
ShapeTree<HloInstruction*> param_copy_tree(param->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * copied,
entry->DeepCopyInstruction(
param, ¶m_indices_to_copy, ¶m_copy_tree));
if (param == root) {
entry->set_root_instruction(copied);
root = copied;
}
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(param->ReplaceUseWith(user, copied));
}
copied_parameters[param->parameter_number()] = param_copy_tree;
}
if (!has_alias) {
return absl::OkStatus();
}
ShapeTree<HloInstruction*> output_copy_tree(root->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * root_copied,
root->parent()->DeepCopyInstruction(
root, &output_indices_to_copy, &output_copy_tree));
TF_RETURN_IF_ERROR(module->input_output_alias_config().ForEachAliasWithStatus(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias& alias) -> absl::Status {
if (!copied_parameters[alias.parameter_number]) {
return absl::OkStatus();
}
HloInstruction* from =
copied_parameters[alias.parameter_number]->element(
alias.parameter_index);
HloInstruction* to = output_copy_tree.element(output_index);
TF_RET_CHECK(from != nullptr);
TF_RET_CHECK(to != nullptr);
TF_RETURN_IF_ERROR(from->AddControlDependencyTo(to));
return absl::OkStatus();
}));
entry->set_root_instruction(root_copied);
return absl::OkStatus();
}
absl::Status StripControlDependenciesFrom(HloInstruction* instruction) {
while (!instruction->control_successors().empty()) {
TF_RETURN_IF_ERROR(instruction->RemoveControlDependencyTo(
instruction->control_successors().front()));
}
while (!instruction->control_predecessors().empty()) {
TF_RETURN_IF_ERROR(
instruction->control_predecessors().front()->RemoveControlDependencyTo(
instruction));
}
return absl::OkStatus();
}
class LiveRangeRegions {
public:
struct InstructionInfo {
InstructionInfo() : value_definition(nullptr), is_definition(false) {}
HloInstruction* value_definition;
bool is_definition;
std::string ToString() const {
return absl::StrCat(
"is_definition: ", std::to_string(is_definition),
", value_definition: ",
value_definition ? value_definition->name() : "nullptr");
}
};
typedef HloInstructionMap<InstructionInfo> InstructionMap;
typedef std::pair<HloInstruction*, InstructionInfo> InstructionEntry;
typedef absl::flat_hash_map<const HloComputation*, InstructionMap>
ComputationMap;
InstructionMap& operator[](const HloComputation* computation) {
if (computation_map_.find(computation) == computation_map_.end()) {
computation_vector_.push_back(computation);
}
return computation_map_[computation];
}
const InstructionMap& operator[](const HloComputation* computation) const {
ComputationMap::const_iterator p = computation_map_.find(computation);
CHECK(p != computation_map_.end());
return p->second;
}
absl::InlinedVector<const HloComputation*, 5>::const_iterator begin() const {
return computation_vector_.begin();
}
absl::InlinedVector<const HloComputation*, 5>::const_iterator end() const {
return computation_vector_.end();
}
int64_t size() const {
CHECK_EQ(computation_vector_.size(), computation_map_.size());
return computation_vector_.size();
}
bool empty() const { return size() == 0; }
const HloComputation* Computation(int64_t index) const {
return computation_vector_[index];
}
bool contains(HloInstruction* instr) const {
CHECK_NE(instr, nullptr);
auto* computation = instr->parent();
auto p = computation_map_.find(computation);
if (p == computation_map_.end()) {
return false;
}
auto instr_map = (*p).second;
return instr_map.find(instr) != instr_map.end();
}
std::string ToString() const {
std::string result;
for (const auto* computation : computation_vector_) {
StrAppend(&result, "computation: ", computation->name(), "\n");
for (const auto& entry : computation_map_.at(computation)) {
StrAppend(&result, " entry: ", entry.first->name(), ", ",
entry.second.ToString(), "\n");
}
}
return result;
}
private:
ComputationMap computation_map_;
absl::InlinedVector<const HloComputation*, 5> computation_vector_;
};
namespace {
class Relation {
public:
enum RuntimeOrder {
kNoOverlap = 0,
kSameInstr = 1,
kBeforeStart = 2,
kBeforeStartOrSameInstr = kBeforeStart | kSameInstr,
kAfterEnd = 4,
kAfterEndOrSameInstr = kAfterEnd | kSameInstr,
kBeforeStartOrAfterEnd = kBeforeStart | kAfterEnd,
kBeforeOrAfterOrOverlap = kBeforeStart | kAfterEnd | kSameInstr,
};
Relation() : intercept_def_use_(false) {}
explicit Relation(RuntimeOrder order, bool intercept_def_use = false)
: intercept_def_use_(intercept_def_use) {
orders_.push_back(order);
}
Relation(const Relation& that)
: intercept_def_use_(that.intercept_def_use_), orders_(that.orders_) {}
bool operator==(const Relation& that) const {
return intercept_def_use_ == that.intercept_def_use_ &&
absl::c_equal(orders_, that.orders_);
}
bool UseImpliesInterception() const {
CHECK_EQ(orders_.size(), 1);
return UseImpliesInterception(orders_[0]);
}
bool DefinitionImpliesInterception() const {
CHECK_EQ(orders_.size(), 1);
return DefinitionImpliesInterception(orders_[0]);
}
bool InterceptDefUse() const { return intercept_def_use_; }
void UpdateInterception(bool value) {
CHECK_EQ(orders_.size(), 1);
intercept_def_use_ = value;
}
Relation::RuntimeOrder GetRuntimeOrder() const {
if (orders_.empty()) {
return Relation::kNoOverlap;
}
CHECK_EQ(orders_.size(), 1);
return orders_[0];
}
bool RuntimeOrderOverlap() const {
return absl::c_any_of(orders_, ImpliesOverlap);
}
bool RuntimeOrderIsUnordered() const {
return orders_.size() == 1 && orders_[0] == kBeforeStartOrAfterEnd;
}
bool RuntimeOrderIsNoOverlap() const {
return orders_.empty() || (orders_.size() == 1 && orders_[0] == kNoOverlap);
}
bool RuntimeOrderIsRunBefore() const {
return orders_.size() == 1 && orders_[0] == kBeforeStart;
}
bool RuntimeOrderIsRunAfter() const {
return orders_.size() == 1 && orders_[0] == kAfterEnd;
}
std::string ToString() const {
return absl::StrCat("Interception = ", intercept_def_use_, ";",
absl::StrJoin(orders_, ","));
}
static bool DefinitionImpliesInterception(RuntimeOrder definition) {
return (definition == kAfterEnd || definition == kBeforeStartOrAfterEnd);
}
static bool UseImpliesInterception(RuntimeOrder use) {
return (use == kBeforeStart || use == kBeforeStartOrAfterEnd);
}
void UnionRelationFromSameSource(const Relation& rel) {
CHECK_LE(orders_.size(), 1);
CHECK_EQ(rel.orders_.size(), 1);
if (orders_.empty()) {
orders_.push_back(rel.orders_[0]);
} else {
orders_[0] = Union(orders_[0], rel.orders_[0]);
}
intercept_def_use_ = intercept_def_use_ || rel.intercept_def_use_;
}
void UnionRelationFromDifferentSource(const Relation& rel) {
if (rel.orders_.empty()) {
return;
}
CHECK_EQ(rel.orders_.size(), 1);
intercept_def_use_ = intercept_def_use_ || rel.intercept_def_use_;
for (auto& local_order : orders_) {
if (OverwriteIfSubsume(rel.orders_[0], &local_order)) {
return;
}
}
orders_.push_back(rel.orders_[0]);
}
static Relation::RuntimeOrder ReverseRuntimeOrder(RuntimeOrder order) {
switch (order) {
case kNoOverlap:
case kSameInstr:
case kBeforeStartOrAfterEnd:
case kBeforeOrAfterOrOverlap:
return order;
case kBeforeStart:
return kAfterEnd;
case kBeforeStartOrSameInstr:
return kAfterEndOrSameInstr;
case kAfterEnd:
return kBeforeStart;
case kAfterEndOrSameInstr:
return kBeforeStartOrSameInstr;
}
}
private:
bool intercept_def_use_;
absl::InlinedVector<RuntimeOrder, 4> orders_;
static RuntimeOrder Union(RuntimeOrder o1, RuntimeOrder o2) {
return static_cast<Relation::RuntimeOrder>(o1 | o2);
}
static bool ImpliesOverlap(RuntimeOrder o) {
return o >= RuntimeOrder::kBeforeStartOrAfterEnd;
}
static bool Subsume(RuntimeOrder o1, RuntimeOrder o2) {
return Union(o1, o2) == o1;
}
static bool OverwriteIfSubsume(RuntimeOrder o2, RuntimeOrder* o1) {
if (*o1 == o2) {
return true;
}
CHECK_NE(o1, nullptr);
if (Subsume(o2, *o1)) {
*o1 = o2;
return true;
} else if (Subsume(*o1, o2)) { | #include "xla/service/copy_insertion.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ::testing::NotNull;
using ::testing::UnorderedElementsAre;
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
int64_t CountControlEdges(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountControlEdges(*computation);
}
return count;
}
class CopyInsertionTest : public HloTestBase {
protected:
void InsertCopies(HloModule* module) {
CopyInsertion copy_insertion;
VLOG(3) << "Before copy inser: " << module->ToString();
ASSERT_IS_OK(copy_insertion.Run(module).status());
VLOG(2) << "After copy inser: " << module->ToString();
}
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(CopyInsertionTest, SingleParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({x}));
EXPECT_THAT(x->users(), UnorderedElementsAre(tuple));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(x)));
}
TEST_F(CopyInsertionTest, SingleConstant) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant}));
EXPECT_THAT(constant->users(), UnorderedElementsAre(tuple));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(constant)));
}
TEST_F(CopyInsertionTest, ExistingCopiesNotRemoved) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}})));
auto minor_to_major = LayoutUtil::MinorToMajor(constant->shape());
Layout reversed_layout =
LayoutUtil::MakeLayoutFromMajorToMinor(minor_to_major);
Shape copy_shape = constant->shape();
*copy_shape.mutable_layout() = reversed_layout;
HloInstruction* copy_1 = builder.AddInstruction(
HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* copy_2 = builder.AddInstruction(
HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, copy_1, copy_2));
builder.AddInstruction(
HloInstruction::CreateUnary(add->shape(), HloOpcode::kCopy, add));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(CountCopies(*module), 3);
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_EQ(module->entry_computation()->root_instruction(), add);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(CopyInsertionTest, MultipleConstantsAndParameters) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
HloInstruction* y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "y"));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, constant1, y));
builder.AddInstruction(HloInstruction::CreateTuple({constant2, x, add}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(constant2), op::Copy(x), op::Add(constant1, y)));
}
TEST_F(CopyInsertionTest, BitcastParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {4}), "x"));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2, 2}), x));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(x->users(), UnorderedElementsAre(bitcast));
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(old_root));
}
TEST_F(CopyInsertionTest, BitcastConstant) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 42.0})));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2}), constant));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(constant->users(), UnorderedElementsAre(bitcast));
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(old_root));
}
TEST_F(CopyInsertionTest, BitcastTupleElementParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {4}), "x"));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2, 2}), x));
builder.AddInstruction(HloInstruction::CreateTuple({bitcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(x->users(), UnorderedElementsAre(bitcast));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(bitcast)));
}
TEST_F(CopyInsertionTest, NestedTupleParameter) {
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {1, 2, 3})}),
ShapeUtil::MakeShape(F32, {42})}),
"param0"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(HloOpcode::kParameter,
module->entry_computation()->root_instruction()->opcode());
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 3);
HloInstruction* new_root = module->entry_computation()->root_instruction();
EXPECT_NE(old_root, new_root);
EXPECT_THAT(
new_root,
op::Tuple(
op::Tuple(
op::Copy(op::GetTupleElement(op::GetTupleElement(old_root))),
op::Copy(op::GetTupleElement(op::GetTupleElement(old_root)))),
op::Copy(op::GetTupleElement(old_root))));
}
TEST_F(CopyInsertionTest, ElementOfNestedTupleParameter) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {1, 2, 3})}),
ShapeUtil::MakeShape(F32, {42})}),
"param0"));
auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(param->shape(), {0}), param, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gte, module->entry_computation()->root_instruction());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(op::GetTupleElement(op::GetTupleElement(param))),
op::Copy(op::GetTupleElement(op::GetTupleElement(param)))));
}
class WhileCopyInsertionTest : public CopyInsertionTest {
protected:
WhileCopyInsertionTest() : module_(CreateNewVerifiedModule()) {}
std::unique_ptr<HloComputation> BuildConditionComputation(
const Shape& loop_state_shape) {
auto builder = HloComputation::Builder(TestName() + ".Condition");
auto limit_const = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(10)));
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
limit_const->shape(), loop_state, 0));
builder.AddInstruction(HloInstruction::CreateCompare(
condition_result_shape_, induction_variable, limit_const,
ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
auto data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, induction_variable));
auto update = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, convert, {}));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyComputation2() {
auto builder = HloComputation::Builder(TestName() + ".Body");
const Shape& loop_state_shape = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, data_shape_, data_shape_});
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
HloInstruction* data1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
HloInstruction* data2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 2));
builder.AddInstruction(HloInstruction::CreateTuple({add0, data1, data2}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyOneReadOnlyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, induction_variable));
auto update = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, convert, {}));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
builder.AddInstruction(
HloInstruction::CreateTuple({induction_variable, add1}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildIndependentBodyComputation(
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".Body");
const Shape& loop_state_shape =
nested ? nested_loop_state_shape_ : loop_state_shape_;
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
HloInstruction* data = nullptr;
if (nested) {
data = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
nested_tuple_shape_, loop_state, 1));
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, data, 0));
} else {
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
}
auto update = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
if (nested) {
auto nested_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add1, add1}));
builder.AddInstruction(HloInstruction::CreateTuple({add0, nested_tuple}));
} else {
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
}
return builder.Build();
}
std::unique_ptr<HloComputation> BuildNestedBodyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(HloInstruction::CreateParameter(
0, nested_loop_state_shape_, "loop_state"));
auto gte0 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
gte0->shape(), HloOpcode::kAdd, gte0, inc));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
nested_tuple_shape_, loop_state, 1));
auto gte10 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 0));
auto update10 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add10 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, gte10, update10));
auto gte11 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 1));
auto rev11 = builder.AddInstruction(
HloInstruction::CreateReverse(data_shape_, gte11, {0}));
auto inner_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add10, rev11}));
builder.AddInstruction(HloInstruction::CreateTuple({add0, inner_tuple}));
return builder.Build();
}
HloInstruction* BuildWhileInstruction(HloComputation* condition,
HloComputation* body,
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".While");
auto induction_var_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
if (nested) {
auto inner_init = builder.AddInstruction(
HloInstruction::CreateTuple({data_init, data_init}));
auto loop_state_init = builder.AddInstruction(
HloInstruction::CreateTuple({induction_var_init, inner_init}));
auto while_hlo = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_init->shape(), condition, body, loop_state_init));
module_->AddEntryComputation(builder.Build());
return while_hlo;
}
auto loop_state_init = builder.AddInstruction(
HloInstruction::CreateTuple({induction_var_init, data_init}));
auto while_hlo = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition, body, loop_state_init));
module_->AddEntryComputation(builder.Build());
return while_hlo;
}
HloInstruction* BuildWhileInstruction_InitPointsToConstant() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto data_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToParameter() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto data_init = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "data_init"));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToNonDistinct() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto data_init =
builder.AddInstruction(HloInstruction::CreateTuple({one_vec, one_vec}));
return BuildWhileInstructionWithCustomInit(nested_loop_state_shape_,
data_init, &builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToInterfering() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data_init, one_vec));
auto xla_while = BuildWhileInstructionWithCustomInit(loop_state_shape_,
data_init, &builder);
auto gte = xla_while->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(xla_while->shape(), {1}), xla_while, 1));
auto sub = xla_while->parent()->AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kSubtract, add, gte));
auto gte0 = xla_while->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(xla_while->shape(), {0}), xla_while, 0));
auto tuple = xla_while->parent()->AddInstruction(
HloInstruction::CreateTuple({gte0, sub}));
xla_while->parent()->set_root_instruction(tuple);
return xla_while;
}
HloInstruction* BuildWhileInstructionWithCustomInit(
const Shape& loop_state_shape, HloInstruction* data_init,
HloComputation::Builder* builder) {
const bool nested =
ShapeUtil::Equal(loop_state_shape, nested_loop_state_shape_);
auto induction_var_init = builder->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto body = module_->AddEmbeddedComputation(
BuildIndependentBodyComputation(nested));
auto loop_state_init = builder->AddInstruction(
HloInstruction::CreateTuple({induction_var_init, data_init}));
auto while_hlo = builder->AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition, body, loop_state_init));
module_->AddEntryComputation(builder->Build());
return while_hlo;
}
std::unique_ptr<HloModule> module_;
Shape induction_variable_shape_ = ShapeUtil::MakeShape(S32, {});
Shape data_shape_ = ShapeUtil::MakeShape(F32, {8});
Shape loop_state_shape_ =
ShapeUtil::MakeTupleShape({induction_variable_shape_, data_shape_});
Shape nested_tuple_shape_ =
ShapeUtil::MakeTupleShape({data_shape_, data_shape_});
Shape nested_loop_state_shape_ = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, nested_tuple_shape_});
Shape condition_result_shape_ = ShapeUtil::MakeShape(PRED, {});
};
TEST_F(WhileCopyInsertionTest, IndependentTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body =
module_->AddEmbeddedComputation(BuildIndependentBodyComputation());
auto while_hlo = BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountControlEdges(*module_), 0);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterWithCopies) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8])) -> (s32[], f32[8]) {
%loop_state.1 = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8])) -> pred[] {
%loop_state = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}) while((s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_-> |
1,942 | cpp | tensorflow/tensorflow | while_loop_analysis | third_party/xla/xla/service/while_loop_analysis.cc | third_party/xla/xla/service/while_loop_analysis_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_ANALYSIS_H_
#define XLA_SERVICE_WHILE_LOOP_ANALYSIS_H_
#include <optional>
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
std::optional<int64_t> ComputeWhileLoopTripCount(
const HloInstruction *while_op, int64_t max_brute_force_iters = 128);
std::optional<int64_t> ComputeWhileLoopTripCountUpperBound(
const HloInstruction *while_op);
std::vector<const HloInstruction *> GetAuxiliaryLoopInductionVars(
const HloInstruction *while_op);
std::optional<int64_t> GetLoopInductionVarTupleIdx(
const HloInstruction *while_op);
std::optional<int64_t> MatchTrivialLoopTripCount(const HloInstruction *while_op,
int64_t indvar_tuple_idx,
const Literal &indvar_init);
}
#endif
#include "xla/service/while_loop_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
using std::nullopt;
using std::optional;
namespace m = match;
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
static optional<int64_t> GetGTEOperandIndex(const HloInstruction* instr,
const HloInstruction* gte_operand) {
VLOG(2) << "GetGTEOperandIndex(" << instr->ToString() << ", "
<< gte_operand->ToString() << ")";
optional<int64_t> tuple_idx;
for (const HloInstruction* operand : instr->operands()) {
if (Match(operand, m::Constant())) {
continue;
}
auto possibly_gte_operand = operand;
if (operand->opcode() == HloOpcode::kCopy) {
possibly_gte_operand = operand->operand(0);
}
if (possibly_gte_operand->opcode() != HloOpcode::kGetTupleElement) {
return nullopt;
}
if (!Match(possibly_gte_operand,
m::GetTupleElement(m::Op().Is(gte_operand)))) {
return nullopt;
}
int64_t operand_tuple_idx = possibly_gte_operand->tuple_index();
if (!tuple_idx.has_value()) {
tuple_idx = operand_tuple_idx;
} else {
if (operand_tuple_idx != tuple_idx) {
return nullopt;
}
}
}
return tuple_idx;
}
std::vector<const HloInstruction*> GetAuxiliaryLoopInductionVars(
const HloInstruction* while_op) {
std::vector<const HloInstruction*> aux_ind_gte;
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* while_body_param = while_body->parameter_instruction(0);
VLOG(2) << "Aux Induction Variables for loop:" << while_op->ToShortString();
VLOG(2) << "the parameter instr:" << while_body_param->ToShortString();
VLOG(2) << "the parameter user count:" << while_body_param->users().size();
if (while_body_param == nullptr) return aux_ind_gte;
std::map<int64_t, const HloInstruction*> extractions;
for (const HloInstruction* indx_instr : while_body_param->users()) {
if (indx_instr->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
auto it = extractions.find(indx_instr->tuple_index());
if (it != extractions.end()) {
it->second = nullptr;
VLOG(2) << "two extractions at same index:" << indx_instr->ToString();
} else {
extractions.insert(std::make_pair(indx_instr->tuple_index(), indx_instr));
VLOG(2) << "inserting extraction :" << indx_instr->ToString();
}
}
VLOG(2) << "total extractions size:" << extractions.size() << std::endl;
if (extractions.empty()) {
return aux_ind_gte;
}
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body root is not a tuple:" << while_body_root->ToString();
return aux_ind_gte;
}
int64_t index = -1;
std::map<int64_t, const HloInstruction*> insertions;
for (const HloInstruction* operand : while_body_root->operands()) {
index++;
if (!operand->IsConstant()) {
auto it = insertions.find(index);
if (it != insertions.end()) {
it->second = nullptr;
VLOG(2) << "two insertions at same index:" << operand->ToString();
} else {
insertions.insert(std::make_pair(index, operand));
VLOG(2) << "inserting insertions:" << operand->ToString();
}
}
}
if (insertions.empty()) {
return aux_ind_gte;
}
std::map<int64_t, std::pair<const HloInstruction*, const HloInstruction*>>
candidate_pairs;
for (; index >= 0; --index) {
const HloInstruction *ext, *inst;
ext = (extractions.find(index) != extractions.end())
? extractions.find(index)->second
: nullptr;
inst = (insertions.find(index) != insertions.end())
? insertions.find(index)->second
: nullptr;
if (ext != nullptr && inst != nullptr) {
if (ext != inst) {
candidate_pairs.insert(
std::make_pair(index, std::make_pair(ext, inst)));
}
}
}
VLOG(2) << "total candidate pairs:" << candidate_pairs.size() << std::endl;
const auto add_dependencies = [](const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs) {
HloInstruction* non_const_operand = nullptr;
int num_non_constants = 0;
for (HloInstruction* operand : hlo->operands()) {
if (!operand->IsConstant()) {
num_non_constants++;
non_const_operand = operand;
}
}
if (num_non_constants == 1 &&
(hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kAdd ||
hlo->opcode() == HloOpcode::kMultiply ||
hlo->opcode() == HloOpcode::kDivide ||
hlo->opcode() == HloOpcode::kSubtract)) {
inputs->push_back(non_const_operand);
}
};
std::unique_ptr<HloReachabilityMap> hrm =
HloReachabilityMap::BuildWithRestrictions(
while_body,
absl::FunctionRef<void(const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs)>(
add_dependencies));
for (auto candidates : candidate_pairs) {
VLOG(2) << "are reachable?:" << (candidates.second.first)->ToString()
<< "*************" << (candidates.second.second)->ToString()
<< std::endl;
if (hrm->IsReachable(candidates.second.first, candidates.second.second)) {
aux_ind_gte.push_back(candidates.second.first);
VLOG(2) << "YES";
} else {
VLOG(2) << "NO";
}
}
VLOG(2) << "num auxiliary candidates :" << aux_ind_gte.size();
return aux_ind_gte;
}
optional<int64_t> GetLoopInductionVarTupleIdx(const HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
VLOG(2) << "Finding induction variable for loop "
<< while_op->ToShortString();
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_param = while_cond->parameter_instruction(0);
optional<int64_t> indvar_tuple_idx =
GetGTEOperandIndex(while_cond_root, while_cond_param);
if (!indvar_tuple_idx) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body's root is not a tuple instruction: "
<< while_body_root->ToString();
return nullopt;
}
auto* while_body_inc = while_body_root->operand(*indvar_tuple_idx);
auto* while_body_param = while_body->parameter_instruction(0);
optional<int64_t> while_body_indvar_tuple_idx =
GetGTEOperandIndex(while_body_inc, while_body_param);
if (!while_body_indvar_tuple_idx) {
VLOG(2)
<< "Induction variable not found in while body increment instruction: "
<< while_body_inc->ToString();
return nullopt;
}
if (while_body_indvar_tuple_idx != indvar_tuple_idx) {
VLOG(2) << "Tuple index of induction variable does not match between loop "
"condition ("
<< *indvar_tuple_idx << ") and while body ("
<< *while_body_indvar_tuple_idx << ")";
return nullopt;
}
auto* while_init = while_op->operand(0);
if (while_init->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While init expected to be a tuple: " << while_init->ToString();
return nullopt;
}
VLOG(2) << "Induction variable's tuple index: " << *indvar_tuple_idx;
return indvar_tuple_idx;
}
optional<int64_t> CheckedAdd(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa + bb);
if (a >= 0 == b >= 0 && result >= 0 != a >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> CheckedSubtract(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa - bb);
if (a >= 0 != b >= 0 && result >= 0 == b >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> MatchTrivialLoopTripCount(const HloInstruction* while_op,
int64_t indvar_tuple_idx,
const Literal& indvar_init) {
optional<int64_t> indvar_init_val =
LiteralUtil::LiteralAsScalarInt64(indvar_init);
if (!indvar_init_val) {
VLOG(2) << "Pattern-match failed: induction variable init is not a "
"constant scalar representable as an int64_t: "
<< indvar_init.ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_indvar_update =
while_body->root_instruction()->mutable_operand(indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
int64_t trip_count_step = 0;
if (!Match(while_body_indvar_update,
m::AddAnyOrder(m::Op().Is(while_body_indvar),
m::Op(&trip_count_increase_step_instr)))) {
if (trip_count_increase_step_instr == nullptr) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"updated by an add operation: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!trip_count_increase_step_instr->IsConstant() ||
!ShapeUtil::IsEffectiveScalar(
trip_count_increase_step_instr->shape())) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"incremented by constant: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.has_value()) {
VLOG(2)
<< "Pattern-match failed: trip count step is not an integral type: "
<< trip_count_increase_step_instr->shape().ToString();
return nullopt;
}
VLOG(2) << "Pattern-match for trip count step failed: "
<< trip_count_increase_step_instr->ToString();
}
trip_count_step = LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.value();
if (trip_count_step <= 0) {
VLOG(2) << "Pattern-match failed: trip count step is not a natural number: "
<< trip_count_step;
return nullopt;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
HloInstruction* while_cond_bound = nullptr;
if (!Match(while_cond_root,
m::Op().WithBinaryOperandsAnyOrder(
m::Op().Is(while_cond_indvar),
m::ConstantEffectiveScalar(&while_cond_bound)))) {
VLOG(2) << "Pattern-match failed: while condition is not of the form "
"op(i, N) or op(N, i).";
return nullopt;
}
optional<int64_t> while_cond_bound_val =
LiteralUtil::LiteralAsScalarInt64(while_cond_bound->literal());
if (!while_cond_bound_val) {
VLOG(2) << "Pattern-match failed: while condition induction variable is "
"not a constant scalar representable as an int64_t.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLt)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i < N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (trips) {
const int64_t remainder = std::remainder(*trips, trip_count_step);
const int64_t div = std::floor(*trips / trip_count_step);
if (remainder == 0) {
return std::max(int64_t{0}, div);
}
trips = CheckedAdd(div, 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (*trips < *while_cond_bound_val) {
return std::max(int64_t{0}, *trips);
}
return std::max(int64_t{0}, div);
}
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLe)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i <= N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
trips = CheckedAdd(std::floor(*trips / trip_count_step), 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
return std::max<int64_t>(0, *trips);
}
VLOG(2) << "Pattern-match failed: while condition follows unknown pattern: "
<< while_cond_root->ToString();
return nullopt;
}
optional<int64_t> ComputeWhileLoopTripCount(const HloInstruction* while_op,
int64_t max_brute_force_iters) {
VLOG(2) << "Getting trip count for loop " << while_op->ToString();
optional<int64_t> indvar_tuple_idx = GetLoopInductionVarTupleIdx(while_op);
if (!indvar_tuple_idx) {
return nullopt;
}
HloEvaluator evaluator(0);
auto* while_init = while_op->operand(0);
auto* indvar_init = while_init->operand(*indvar_tuple_idx);
absl::StatusOr<Literal> indvar_init_result = evaluator.Evaluate(indvar_init);
if (!indvar_init_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable init, "
<< indvar_init_result.status() << ", " << indvar_init->ToString();
return nullopt;
}
Literal indvar_iter_val = std::move(indvar_init_result).value();
if (auto trip_count = MatchTrivialLoopTripCount(while_op, *indvar_tuple_idx,
indvar_iter_val)) {
return trip_count;
}
auto* while_body = while_op->while_body();
auto* while_body_indvar_update =
while_body->root_instruction()->operand(*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
for (int64_t trip_count = 0; trip_count != max_brute_force_iters + 1;
++trip_count) {
absl::StatusOr<Literal> result = evaluator.EvaluateWithSubstitutions(
while_cond_root, {{while_cond_indvar, &indvar_iter_val}});
if (!result.ok()) {
VLOG(2) << "Couldn't evaluate while cond: " << result.status();
return nullopt;
}
if (result.value().data<bool>() == absl::Span<const bool>{false}) {
VLOG(2) << "Loop has static trip count of " << trip_count;
return trip_count;
}
absl::StatusOr<Literal> indvar_next_result =
evaluator.EvaluateWithSubstitutions(
while_body_indvar_update, {{while_body_indvar, &indvar_iter_val}});
if (!indvar_next_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable update: "
<< indvar_next_result.status();
return nullopt;
}
indvar_iter_val = std::move(indvar_next_result).value();
}
VLOG(2) << "Loop has unknown trip count.";
return nullopt;
}
static HloInstruction* GetOnlyGTE(HloInstruction* inst) {
if (inst->user_count() != 1) {
return nullptr;
}
HloInstruction* user = inst->users().back();
if (user->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
return user;
}
optional<int64_t> ComputeWhileLoopTripCountUpperBound(
const HloInstruction* while_op) {
auto exact_trip_count = ComputeWhileLoopTripCount(while_op);
if (exact_trip_count) {
VLOG(2) << "Loop has exact trip count.";
return exact_trip_count;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_param = while_cond->parameter_instruction(0);
auto* cond_gte = GetOnlyGTE(while_cond_param);
if (!cond_gte) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(3) << "While body's root is not a tuple instruction: "
<< while_body_root->ToString();
return nullopt;
}
int64_t indvar_index = cond_gte->tuple_index();
auto* while_body_indvar = while_body_root->operand(indvar_index);
if (while_body_indvar->opcode() != HloOpcode::kConstant) {
VLOG(3) << "While body does not set the IV to a constant: "
<< while_body_indvar->ToString();
return nullopt;
}
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
auto new_param = HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({cond_gte->shape()}), "temp");
replacements[cond_gte] =
HloInstruction::CreateGetTupleElement(new_param.get(), 0);
replacements[while_cond_param] = std::move(new_param);
auto new_module = std::make_unique<HloModule>("temp_mod", HloModuleConfig{});
auto* new_computation = new_module->AddEmbeddedComputation(
while_cond->CloneWithReplacements(&replacements));
HloEvaluator evaluator(0);
Literal fake_input = Literal::CreateFromShape(
new_computation->parameter_instruction(0)->shape());
TF_CHECK_OK(fake_input.CopyFrom(while_body_indvar->literal(),
{0},
{}));
absl::StatusOr<Literal> eval_result =
evaluator.Evaluate(*new_computation, {std::move(fake_input)});
if (!eval_result.ok()) {
VLOG(2) << "Couldn't evaluate while loop condition.";
return nullopt;
}
Literal cond_result_pred = std::move(eval_result.value());
CHECK(Shape::Equal().IgnoreLayout()(cond_result_pred.shape(),
ShapeUtil::MakeShape(PRED, {})));
bool cond_returns_true = cond_result_pred.GetFirstElement<bool>();
if (!cond_returns_true) {
VLOG(2) << "Upper bound on the trip count is 1";
return 1;
}
VLOG(2) << "Loop has no known upper bound on the trip count.";
return nullopt;
}
} | #include "xla/service/while_loop_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class WhileLoopAnalysisTest : public HloTestBase {
protected:
[[nodiscard]] absl::StatusOr<int64_t> MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir);
};
absl::StatusOr<int64_t> WhileLoopAnalysisTest::MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir) {
std::string hlo_string_template = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
index = s32[] get-tuple-element(p_body), index=1
one = s32[] constant({{STEP}})
inc = s32[] add(index, one)
ROOT root = (f32[2], s32[]) tuple(val, inc)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant({{LIMIT}})
ROOT result = pred[] compare(gte, const), direction={{COMP_DIR}}
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] constant({{INIT}})
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
std::string hlo_string =
absl::StrReplaceAll(hlo_string_template,
{{"{{INIT}}", absl::StrCat(init)},
{"{{LIMIT}}", absl::StrCat(limit)},
{"{{STEP}}", absl::StrCat(step)},
{"{{COMP_DIR}}", ComparisonDirectionToString(dir)}});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::optional<int64_t> trip_count = MatchTrivialLoopTripCount(
while_op, 1,
Cast<HloConstantInstruction>(
module->GetComputationWithName("entry")->GetInstructionWithName(
"param.1"))
->literal());
CHECK(trip_count.has_value());
return *trip_count;
}
TEST_F(WhileLoopAnalysisTest, SingleIterationUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 1);
}
TEST_F(WhileLoopAnalysisTest, NoUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(42)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(ComputeWhileLoopTripCountUpperBound(while_op), std::nullopt);
}
int CalculateTripCount(int init, int limit, int step, ComparisonDirection dir) {
int trip_count = 0;
if (dir == ComparisonDirection::kLt) {
for (int i = init; i < limit; i += step) {
trip_count++;
}
} else if (dir == ComparisonDirection::kLe) {
for (int i = init; i <= limit; i += step) {
trip_count++;
}
} else {
LOG(FATAL) << "Unknown comparison direction: "
<< ComparisonDirectionToString(dir);
}
return trip_count;
}
TEST_F(WhileLoopAnalysisTest, ExactBoundTrivialTripCount) {
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLe));
}
TEST_F(WhileLoopAnalysisTest, NoAIVNoConstChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
val3 = s32[] get-tuple-element(p_body), index=2
add = s32[] add(val2, val3)
sub = s32[] subtract(add, val3)
ROOT root = (f32[2], s32[], s32[]) tuple(val1, add, sub)
}
condition {
p_cond = (f32[2], s32[], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
param.2 = s32[] parameter(2)
while_init = (f32[2], s32[], s32[]) tuple(param.0, param.1, param.2)
ROOT while = (f32[2], s32[], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVMultiChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const.1 = s32[] constant(42)
const.2 = s32[] constant(42)
const.3 = s32[] constant(42)
add = s32[] add(val2, const.1)
sub = s32[] subtract(add, const.2)
mul = s32[] multiply(sub, const.3)
ROOT root = (f32[2], s32[]) tuple(val1, mul)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
TEST_F(WhileLoopAnalysisTest, NoAIV) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
add = s32[] add(val2, val2)
const.1 = s32[] constant(42)
mul = s32[] multiply(add, const.1)
div = s32[] divide(mul, add)
ROOT root = (f32[2], s32[]) tuple(val1, div)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVNoChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
add = s32[] add(val2, const)
ROOT root = (f32[2], s32[]) tuple(val1, add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
}
} |
1,943 | cpp | tensorflow/tensorflow | despecializer | third_party/xla/xla/service/despecializer.cc | third_party/xla/xla/service/despecializer_test.cc | #ifndef XLA_SERVICE_DESPECIALIZER_H_
#define XLA_SERVICE_DESPECIALIZER_H_
#include <iterator>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/hlo_pass_pipeline.h"
namespace xla {
class Despecializer : public HloModulePass {
public:
Despecializer();
void AddReduceWindowToReduceBroadcastDeconstruct();
void AddAssumeGatherIndicesInBoundRewriteToCopy();
absl::string_view name() const override { return "despecializer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloPassPipeline pipeline_;
};
class AssumeGatherIndicesInBoundRewriteToCopy : public HloModulePass {
public:
AssumeGatherIndicesInBoundRewriteToCopy() = default;
absl::string_view name() const override {
return "AssumeGatherIndicesInBoundRewriteToCopy";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
class DeconstructReduceWindowToReduceBroadcast : public HloModulePass {
public:
DeconstructReduceWindowToReduceBroadcast() = default;
absl::string_view name() const override {
return "ReduceWindowToReduceAndBroadcast";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
class ControlDepRemover : public HloModulePass {
public:
ControlDepRemover() = default;
absl::string_view name() const override { return "control-dep-remover"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
bool changed = false;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
changed |= !instruction->control_predecessors().empty();
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
}
}
return changed;
}
};
}
#endif
#include "xla/service/despecializer.h"
#include <iterator>
#include <utility>
#include <vector>
#include "xla/service/defuser.h"
#include "xla/service/float_normalization.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/sub_byte_normalization.h"
namespace xla {
Despecializer::Despecializer() : pipeline_("despecializer") {
pipeline_.AddPass<HloDescheduler>();
pipeline_.AddPass<ControlDepRemover>();
pipeline_.AddPass<Defuser>();
pipeline_.AddPass<BFloat16MixedPrecisionRemoval>();
pipeline_.AddPass<SubByteNormalization>(
SubByteNormalization::REMOVE_ELEMENT_SIZE);
}
void Despecializer::AddAssumeGatherIndicesInBoundRewriteToCopy() {
pipeline_.AddPass<AssumeGatherIndicesInBoundRewriteToCopy>();
}
void Despecializer::AddReduceWindowToReduceBroadcastDeconstruct() {
pipeline_.AddPass<DeconstructReduceWindowToReduceBroadcast>();
}
absl::StatusOr<bool> Despecializer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return pipeline_.Run(module, execution_threads);
}
absl::StatusOr<bool> AssumeGatherIndicesInBoundRewriteToCopy::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> candidates;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall("AssumeGatherIndicesInBound")) {
candidates.push_back(instruction);
}
}
}
for (HloInstruction* gather_indices : candidates) {
auto computation = gather_indices->parent();
auto copy = computation->AddInstruction(
HloInstruction::CreateUnary(gather_indices->shape(), HloOpcode::kCopy,
gather_indices->mutable_operand(0)));
TF_CHECK_OK(computation->ReplaceInstruction(gather_indices, copy));
}
return !candidates.empty();
}
absl::StatusOr<bool> DeconstructReduceWindowToReduceBroadcast::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<std::pair<HloInstruction*, int64_t>> candidate_rw;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kReduceWindow) {
continue;
}
auto* reduce_window = CastOrNull<HloReduceWindowInstruction>(instruction);
if (reduce_window == nullptr) {
continue;
}
if (reduce_window->operand(0)->shape() != reduce_window->shape()) {
continue;
}
const Window& window = reduce_window->window();
int64_t num_stride_dilations = absl::c_count_if(
window.dimensions(), [](const WindowDimension& win_dim) {
return (
win_dim.stride() != 1 || win_dim.window_reversal() == true ||
win_dim.window_dilation() != 1 || win_dim.base_dilation() != 1);
});
if (num_stride_dilations != 0) {
continue;
}
int64_t num_dimensions_reduced = absl::c_count_if(
window.dimensions(),
[](const WindowDimension& win_dim) { return (win_dim.size() != 1); });
if (num_dimensions_reduced != 1) {
continue;
}
auto reduce_dim = absl::c_find_if(
window.dimensions(),
[](const WindowDimension& win_dim) { return (win_dim.size() != 1); });
if (reduce_dim == window.dimensions().end()) {
continue;
}
int64_t reduce_dim_index =
std::distance(window.dimensions().begin(), reduce_dim);
auto input_dim_size =
reduce_window->operand(0)->shape().dimensions(reduce_dim_index);
if (reduce_dim->size() != 2 * input_dim_size - 1) {
continue;
}
if (reduce_dim->padding_low() != input_dim_size - 1) {
continue;
}
if (reduce_dim->padding_high() != input_dim_size - 1) {
continue;
}
VLOG(2) << "Adding Candidate ReduceWindow:" << reduce_window->ToString();
candidate_rw.push_back(std::make_pair(reduce_window, reduce_dim_index));
}
}
for (const auto& rw : candidate_rw) {
auto reduce_window = rw.first;
auto reduce_dim_index = rw.second;
if (reduce_window == nullptr || reduce_dim_index < 0 ||
reduce_dim_index >= reduce_window->operand(0)->shape().rank()) {
continue;
}
std::vector<int64_t> reduce_instr_dimensions;
std::vector<int64_t> broadcast_dimensions;
const Window& window = reduce_window->window();
for (int64_t index = 0; index < window.dimensions().size(); ++index) {
const auto& window_dimension = window.dimensions(index);
if (window_dimension.size() == 1) {
reduce_instr_dimensions.push_back(
reduce_window->operand(0)->shape().dimensions(index));
broadcast_dimensions.push_back(index);
}
}
Shape reduce_shape = ShapeUtil::MakeShape(
reduce_window->shape().element_type(), reduce_instr_dimensions);
auto reduce_instr =
reduce_window->AddInstruction(HloInstruction::CreateReduce(
reduce_shape, reduce_window->mutable_operand(0),
reduce_window->mutable_operand(1), {reduce_dim_index},
reduce_window->called_computations()[0]));
auto broadcast_instr =
reduce_window->AddInstruction(HloInstruction::CreateBroadcast(
reduce_window->shape(), reduce_instr, broadcast_dimensions));
VLOG(2) << "reduce_window:" << reduce_window->ToString();
VLOG(2) << "reduce:" << reduce_instr->ToString();
VLOG(2) << "broadcast:" << broadcast_instr->ToString();
TF_CHECK_OK(reduce_window->parent()->ReplaceInstruction(reduce_window,
broadcast_instr));
changed = true;
}
return changed;
}
} | #include "xla/service/despecializer.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class DespecializerTest : public HloTestBase {
protected:
Despecializer despecializer_;
};
TEST_F(DespecializerTest, ValidRW1) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x1x255 pad=0_0x0_0x0_0x127_127}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 3);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 1);
EXPECT_EQ(bcast->dimensions()[2], 2);
}
TEST_F(DespecializerTest, ValidRW2) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x15x1 pad=0_0x0_0x7_7x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 2);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 1);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW3) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,128,32,8]{1,3,2,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,128,32,8]{1,3,2,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x255x1x1 pad=0_0x127_127x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 1);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 0);
EXPECT_EQ(bcast->dimensions()[1], 2);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW4) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[8,32,32,128]{3,0,1,2} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[8,32,32,128]{3,0,1,2} reduce-window(param_0.938,constant.381.clone.1), window={size=15x1x1x1 pad=7_7x0_0x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 0);
EXPECT_EQ(bcast->dimensions().size(), 3);
EXPECT_EQ(bcast->dimensions()[0], 1);
EXPECT_EQ(bcast->dimensions()[1], 2);
EXPECT_EQ(bcast->dimensions()[2], 3);
}
TEST_F(DespecializerTest, ValidRW5) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=1x1x1x32 pad=0_0x0_0x0_0x0_31}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRW6) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32]{1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32]{1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=63x1 pad=31_31x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
auto bcast = m->entry_computation()->root_instruction();
auto reduce = bcast->operand(0);
EXPECT_TRUE(bcast != nullptr && reduce != nullptr);
EXPECT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_EQ(reduce->dimensions().size(), 1);
EXPECT_EQ(reduce->dimensions(0), 0);
EXPECT_EQ(bcast->dimensions().size(), 1);
EXPECT_EQ(bcast->dimensions()[0], 1);
}
TEST_F(DespecializerTest, ValidRWMultiple) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.1 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938,constant.381.clone.1), window={size=63x1x1x255 pad=31_31x0_0x0_0x127_127}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRWStrideDilation) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32,8,128]{3,2,1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=1x1x1x255 pad=0_0x0_0x0_0x127_127 stride=2x1x1x1 lhs_dilate=2x1x1x1}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
TEST_F(DespecializerTest, ValidRWShape) {
const std::string& hlo_text = R"(
HloModule ReduceWindow, is_scheduled=true
%add_float_.1445 {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %maximum = bf16[] add(%lhs, %rhs)
}
ENTRY %main {
%param_0.938 = bf16[32,32,8,128]{3,2,1,0} parameter(0)
%constant.381.clone.1 = bf16[] constant(0)
ROOT %reduce-window.2 = bf16[32,32,2,128]{3,2,1,0} reduce-window(param_0.938, constant.381.clone.1), window={size=1x1x7x1 pad=0_0x0_0x0_0x0_0}, to_apply=%add_float_.1445
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_text).value();
VLOG(2) << despecializer_.Run(m.get()).value();
despecializer_.AddReduceWindowToReduceBroadcastDeconstruct();
EXPECT_TRUE(despecializer_.Run(m.get()).value());
EXPECT_EQ(m->entry_computation()->root_instruction()->opcode(),
HloOpcode::kReduceWindow);
}
}
} |
1,944 | cpp | tensorflow/tensorflow | zero_sized_hlo_elimination | third_party/xla/xla/service/zero_sized_hlo_elimination.cc | third_party/xla/xla/service/zero_sized_hlo_elimination_test.cc | #ifndef XLA_SERVICE_ZERO_SIZED_HLO_ELIMINATION_H_
#define XLA_SERVICE_ZERO_SIZED_HLO_ELIMINATION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ZeroSizedHloElimination : public HloModulePass {
public:
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::string_view name() const override {
return "zero_sized_hlo_elimination";
}
};
}
#endif
#include "xla/service/zero_sized_hlo_elimination.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> ZeroSizedHloElimination::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : comp->MakeInstructionPostOrder()) {
if (instruction->HasSideEffect() || !instruction->shape().IsArray() ||
instruction->opcode() == HloOpcode::kConstant) {
continue;
}
if (comp->IsSafelyRemovable(instruction) &&
ShapeUtil::IsZeroElementArray(instruction->shape()) &&
instruction->shape().is_static()) {
Shape shape = instruction->shape();
if (!LayoutUtil::HasLayout(shape)) {
LayoutUtil::SetToDefaultLayout(&shape);
}
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instruction,
HloInstruction::CreateConstant(Literal::CreateFromShape(shape))));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/zero_sized_hlo_elimination.h"
#include <memory>
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ZeroSizedHloEliminationTest : public HloTestBase {
protected:
ZeroSizedHloEliminationTest()
: HloTestBase(),
builder_("zero_sized_computation"),
zero_sized_param_(
builder_.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 0}), "zero sized param"))) {}
absl::StatusOr<bool> RunZeroSizedElimination() {
auto module = CreateNewVerifiedModule("zero_sized_elimination_test_module");
module->AddEntryComputation(builder_.Build());
return ZeroSizedHloElimination{}.Run(module.get());
}
HloComputation::Builder builder_;
HloInstruction* zero_sized_param_;
};
TEST_F(ZeroSizedHloEliminationTest, EliminatedZeroSizedOp) {
builder_.AddInstruction(HloInstruction::CreateUnary(
zero_sized_param_->shape(), HloOpcode::kTanh, zero_sized_param_));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_TRUE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateParameter) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateSideEffects) {
auto token = builder_.AddInstruction(HloInstruction::CreateToken());
auto send = builder_.AddInstruction(
HloInstruction::CreateSend(zero_sized_param_, token, 0));
builder_.AddInstruction(HloInstruction::CreateSendDone(send));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, DoesNotEliminateConstant) {
builder_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1({})));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_FALSE(changed);
}
TEST_F(ZeroSizedHloEliminationTest, ZeroSizedInstructionWithoutLayoutFolded) {
Shape op_shape = ShapeUtil::MakeShape(F32, {4, 0});
op_shape.clear_layout();
HloInstruction* param1 = builder_.AddInstruction(
HloInstruction::CreateParameter(1, op_shape, "zero sized param 1"));
HloInstruction* param2 = builder_.AddInstruction(
HloInstruction::CreateParameter(2, op_shape, "zero sized param 2"));
builder_.AddInstruction(
HloInstruction::CreateBinary(op_shape, HloOpcode::kAdd, param1, param2));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunZeroSizedElimination());
EXPECT_TRUE(changed);
}
}
} |
1,945 | cpp | tensorflow/tensorflow | algebraic_simplifier | third_party/xla/xla/service/gpu/transforms/algebraic_simplifier.cc | third_party/xla/xla/service/gpu/transforms/algebraic_simplifier_test.cc | #ifndef XLA_SERVICE_ALGEBRAIC_SIMPLIFIER_H_
#define XLA_SERVICE_ALGEBRAIC_SIMPLIFIER_H_
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
class AlgebraicSimplifierOptions {
public:
using ReshapeIsBitcastCallback =
std::function<bool(const Shape& from_shape, const Shape& to_shape)>;
using ConvIsLowerableCallback = std::function<bool(HloInstruction* window)>;
explicit AlgebraicSimplifierOptions(
ReshapeIsBitcastCallback reshape_is_bitcast_callback = {},
ConvIsLowerableCallback conv_is_lowerable_callback = {})
: reshape_is_bitcast_callback_(std::move(reshape_is_bitcast_callback)),
conv_is_lowerable_callback_(std::move(conv_is_lowerable_callback)) {}
bool ReshapeIsBitcast(const Shape& from_shape, const Shape& to_shape) const {
if (!is_layout_sensitive_) {
return false;
}
if (!reshape_is_bitcast_callback_) {
return ShapeUtil::ReshapeIsBitcast(from_shape, to_shape);
}
return reshape_is_bitcast_callback_(from_shape, to_shape);
}
bool ConvIsLowerable(HloInstruction* reverse_dims) const {
if (!conv_is_lowerable_callback_) {
return true;
}
return conv_is_lowerable_callback_(reverse_dims);
}
void set_conv_is_lowerable_callback(
ConvIsLowerableCallback conv_is_lowerable_callback) {
conv_is_lowerable_callback_ = std::move(conv_is_lowerable_callback);
}
void set_is_layout_sensitive(bool is_layout_sensitive) {
is_layout_sensitive_ = is_layout_sensitive;
}
bool is_layout_sensitive() const { return is_layout_sensitive_; }
void set_use_associative_reordering(bool use_associative_reordering) {
use_associative_reordering_ = use_associative_reordering;
}
bool use_associative_reordering() const {
return use_associative_reordering_;
}
void set_associative_reordering_threshold(
double associative_reordering_threshold) {
associative_reordering_threshold_ = associative_reordering_threshold;
}
double associative_reordering_threshold() const {
return associative_reordering_threshold_;
}
void set_enable_dot_strength_reduction(bool enable_dot_strength_reduction) {
enable_dot_strength_reduction_ = enable_dot_strength_reduction;
}
bool enable_dot_strength_reduction() const {
return enable_dot_strength_reduction_;
}
void set_enable_dot_to_multiply_rewrite(bool enable_dot_to_multiply_rewrite) {
enable_dot_to_multiply_rewrite_ = enable_dot_to_multiply_rewrite;
}
bool enable_dot_to_multiply_rewrite() const {
return enable_dot_to_multiply_rewrite_;
}
void set_enable_move_dot_param_to_rhs(bool enable_move_dot_param_to_rhs) {
enable_move_dot_param_to_rhs_ = enable_move_dot_param_to_rhs;
}
bool enable_move_dot_param_to_rhs() const {
return enable_move_dot_param_to_rhs_;
}
void set_supports_non_canonical_dots(bool supports_non_canonical_dots) {
supports_non_canonical_dots_ = supports_non_canonical_dots;
}
bool supports_non_canonical_dots() const {
return supports_non_canonical_dots_;
}
void set_enable_conv_simplification(bool enable_conv_simplification) {
enable_conv_simplification_ = enable_conv_simplification;
}
bool enable_conv_simplification() const {
return enable_conv_simplification_;
}
void set_enable_conv_operand_swap(bool enable_conv_operand_swap) {
enable_conv_operand_swap_ = enable_conv_operand_swap;
}
bool enable_conv_operand_swap() const { return enable_conv_operand_swap_; }
void set_enable_scalar_multiply_reduction(
bool enable_scalar_multiply_reduction) {
enable_scalar_multiply_reduction_ = enable_scalar_multiply_reduction;
}
bool enable_scalar_multiply_reduction() const {
return enable_scalar_multiply_reduction_;
}
void set_enable_floats_are_real(bool enable_floats_are_real) {
enable_floats_are_real_ = enable_floats_are_real;
}
bool enable_floats_are_real() const { return enable_floats_are_real_; }
void set_enable_window_reduce_to_reduce_replacement(
bool enable_window_reduce_to_reduce_replacement) {
enable_window_reduce_to_reduce_replacement_ =
enable_window_reduce_to_reduce_replacement;
}
bool enable_window_reduce_to_reduce_replacement() const {
return enable_window_reduce_to_reduce_replacement_;
}
void set_very_small_gather_size(int64_t size) {
very_small_gather_size_ = size;
}
int64_t very_small_gather_size() const { return very_small_gather_size_; }
void set_cudnn_batchnorm_forward_training_metadata(const std::string& c) {
metadata_.cudnn_batchnorm_forward_training_metadata = c;
}
const std::string& get_cudnn_batchnorm_forward_training_metadata() const {
return metadata_.cudnn_batchnorm_forward_training_metadata;
}
void set_enable_reduce_of_reshape(bool enable_reduce_of_reshape) {
enable_reduce_of_reshape_ = enable_reduce_of_reshape;
}
bool enable_reduce_of_reshape() const { return enable_reduce_of_reshape_; }
void set_enable_negative_padding_replacement(
bool enable_negative_padding_replacement) {
enable_negative_padding_replacement_ = enable_negative_padding_replacement;
}
bool enable_negative_padding_replacement() const {
return enable_negative_padding_replacement_;
}
void set_enable_sink_broadcast(bool enable_sink_broadcast) {
enable_sink_broadcast_ = enable_sink_broadcast;
}
bool enable_sink_broadcast() const { return enable_sink_broadcast_; }
bool unconditionally_simplify_reduce_of_transpose_or_reshape() const {
return unconditionally_simplify_reduce_of_transpose_or_reshape_;
}
void set_unconditionally_simplify_reduce_of_transpose_or_reshape(bool val) {
unconditionally_simplify_reduce_of_transpose_or_reshape_ = val;
}
bool minmax_propagate_nan() const { return minmax_propagate_nan_; }
void set_minmax_propagate_nan(bool val) { minmax_propagate_nan_ = val; }
bool enable_unconditional_reduce_of_concat_replacement() const {
return enable_unconditional_reduce_of_concat_replacement_;
}
void set_enable_unconditional_reduce_of_concat_replacement(
bool enable_unconditional_reduce_of_concat_replacement) {
enable_unconditional_reduce_of_concat_replacement_ =
enable_unconditional_reduce_of_concat_replacement;
}
bool executing_on_cpu() const { return executing_on_cpu_; }
void set_executing_on_cpu(bool executing_on_cpu) {
executing_on_cpu_ = executing_on_cpu;
}
private:
struct Metadata {
std::string cudnn_batchnorm_forward_training_metadata{""};
Metadata() {}
};
ReshapeIsBitcastCallback reshape_is_bitcast_callback_;
ConvIsLowerableCallback conv_is_lowerable_callback_;
bool is_layout_sensitive_{false};
bool enable_dot_strength_reduction_{true};
bool supports_non_canonical_dots_{true};
bool enable_dot_to_multiply_rewrite_{true};
bool enable_move_dot_param_to_rhs_{false};
bool enable_conv_simplification_{true};
bool enable_conv_operand_swap_{true};
bool enable_scalar_multiply_reduction_{false};
bool enable_floats_are_real_{false};
bool enable_window_reduce_to_reduce_replacement_{true};
bool enable_reduce_of_reshape_{true};
bool enable_negative_padding_replacement_{true};
bool enable_sink_broadcast_{true};
bool unconditionally_simplify_reduce_of_transpose_or_reshape_{false};
int64_t very_small_gather_size_{4};
bool minmax_propagate_nan_{true};
bool enable_unconditional_reduce_of_concat_replacement_{true};
bool use_associative_reordering_{false};
bool executing_on_cpu_{false};
double associative_reordering_threshold_{2.0};
Metadata metadata_;
};
class AlgebraicSimplifier : public HloModulePass {
public:
explicit AlgebraicSimplifier(const AlgebraicSimplifierOptions& options)
: options_(options) {}
~AlgebraicSimplifier() override = default;
absl::string_view name() const override { return "algsimp"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
std::unique_ptr<HloInstruction> CreateConstantWithLayoutUpdated(
Literal literal) {
auto constant = HloInstruction::CreateConstant(std::move(literal));
UpdateLayout(constant->mutable_shape());
return constant;
}
protected:
AlgebraicSimplifierOptions options_;
};
class AlgebraicSimplifierVisitor : public DfsHloRewriteVisitor {
public:
explicit AlgebraicSimplifierVisitor(const AlgebraicSimplifierOptions& options,
AlgebraicSimplifier* simplifier)
: options_(options), simplifier_(simplifier) {}
absl::Status HandleAbs(HloInstruction* abs) override;
absl::Status HandleAdd(HloInstruction* add) override;
absl::Status HandleAllToAll(HloInstruction* all_to_all) override;
absl::Status HandleAnd(HloInstruction* logical_and) override;
absl::Status HandleBitcast(HloInstruction* bitcast) override;
absl::Status HandleBitcastConvert(HloInstruction* bitcast) override;
absl::Status HandleBroadcast(HloInstruction* broadcast) override;
absl::Status HandleCompare(HloInstruction* compare) override;
absl::Status HandleConcatenate(HloInstruction* concatenate) override;
absl::Status HandleConstant(HloInstruction* constant) override;
absl::Status HandleCopy(HloInstruction* copy) override;
absl::Status HandleConvert(HloInstruction* convert) override;
absl::Status HandleComplex(HloInstruction* complex) override;
absl::Status HandleCustomCall(HloInstruction* custom_call) override;
absl::Status HandleReal(HloInstruction* real) override;
absl::Status HandleImag(HloInstruction* imag) override;
absl::Status HandleIota(HloInstruction* instruction) override;
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleDivide(HloInstruction* divide) override;
absl::Status HandleDot(HloInstruction* dot) override;
absl::Status HandleGather(HloInstruction* gather) override;
absl::Status HandleGetTupleElement(
HloInstruction* get_tuple_element) override;
absl::Status HandleLog(HloInstruction* log) override;
absl::Status HandleMaximum(HloInstruction* maximum) override;
absl::Status HandleMinimum(HloInstruction* minimum) override;
absl::Status HandleClamp(HloInstruction* clamp) override;
absl::Status HandleMultiply(HloInstruction* multiply) override;
absl::Status HandleNegate(HloInstruction* negate) override;
absl::Status HandleNot(HloInstruction* logical_not) override;
absl::Status HandleOptimizationBarrier(HloInstruction* barrier) override;
absl::Status HandleOr(HloInstruction* logical_or) override;
absl::Status HandlePad(HloInstruction* pad) override;
absl::Status HandlePower(HloInstruction* power) override;
absl::Status HandleRemainder(HloInstruction* remainder) override;
absl::Status HandleReshape(HloInstruction* reshape) override;
absl::Status HandleReduce(HloInstruction* hlo) override;
absl::Status HandleReduceWindow(HloInstruction* hlo) override;
absl::Status HandleReverse(HloInstruction* reverse) override;
absl::Status HandleRsqrt(HloInstruction* rsqrt) override;
absl::Status HandleSlice(HloInstruction* slice) override;
absl::Status HandleSqrt(HloInstruction* sqrt) override;
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override;
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override;
absl::Status HandleScatter(HloInstruction* hlo) override;
absl::Status HandleSelect(HloInstruction* select) override;
absl::Status HandleSort(HloInstruction* sort) override;
absl::Status HandleTranspose(HloInstruction* transpose) override;
absl::Status HandleSubtract(HloInstruction* sub) override;
absl::Status HandleMap(HloInstruction* map) override;
bool Run(HloComputation* computation,
const AlgebraicSimplifierOptions& options,
AlgebraicSimplifier* simplifier);
static std::optional<std::vector<std::vector<int64_t>>> ComputeBitcastDimMap(
const Shape& bitcast_shape, const Shape& operand_shape);
static std::vector<std::vector<int64_t>> InvertBitcastDimMap(
const Shape& original_shape, const Shape& bitcast_shape,
const std::vector<std::vector<int64_t>>& original_map);
static std::optional<Shape> ReshapeLayoutDimensions(
const Shape& original_shape, const Shape& result_shape,
const std::vector<std::vector<int64_t>>& original_map,
const std::vector<std::vector<int64_t>>& result_map);
virtual bool IsValidLayout(const Shape& shape) { return true; }
virtual bool ShouldStrengthReduceDotToReduce(const HloInstruction* hlo) {
return true;
}
protected:
const AlgebraicSimplifierOptions& options_;
private:
absl::StatusOr<bool> RemoveDegenerateDimensionFromDot(HloDotInstruction* dot);
absl::Status SimplifyTransposeOfBroadcast(
HloInstruction* transpose, absl::Span<const int64_t> dimensions);
HloInstruction* AsType(HloInstruction* hlo,
const PrimitiveType element_type) {
if (hlo->shape().element_type() == element_type) {
return hlo;
}
Shape changed_shape =
ShapeUtil::ChangeElementType(hlo->shape(), element_type);
simplifier_->UpdateLayout(&changed_shape);
return computation_->AddInstruction(
HloInstruction::CreateConvert(changed_shape, hlo));
}
absl::StatusOr<HloInstruction*>
NormalizeDotOperandToBatchMajorAndContractingMinor(
HloInstruction* dot_operand, absl::Span<const int64_t> batch_dimensions,
absl::Span<const int64_t> contracting_dimensions);
absl::StatusOr<bool> RemoveTransposesFromDotOperands(HloDotInstruction* dot);
absl::StatusOr<bool> MoveDotParamToRhs(HloDotInstruction* dot);
HloInstruction* AddReduce(HloInstruction* hlo, absl::Span<const int64_t> dims,
PrimitiveType type);
absl::Status ScalarMultiplyReduction(HloInstruction* dot);
void ReplaceWithBitcast(HloInstruction* instruction,
HloInstruction* operand = nullptr);
bool SwapCopyBitcastCopy(HloInstruction* root_copy);
bool ReplaceInstructionIfCompatible(HloInstruction* old_instruction,
HloInstruction* new_instruction);
bool ReplaceInstructionIfCompatible(
HloInstruction* old_instruction,
absl::Span<HloInstruction* const> new_instructions);
bool SameShape(const HloInstruction* lhs, const HloInstruction* rhs) const;
bool SameShape(const Shape& lhs, const Shape& rhs) const;
absl::StatusOr<bool> TryToSinkBroadcastAfterOpWithUniqueNonScalarOperand(
HloInstruction* broadcast);
absl::StatusOr<HloInstruction*> OptimizeDotOfConcat(HloInstruction* dot);
absl::StatusOr<HloInstruction*> OptimizeDotOfConcatHelper(
HloInstruction* dot, HloInstruction* lhs, int64_t lhs_contracting_dim,
HloInstruction* rhs, int64_t rhs_contracting_dim, bool swapped);
absl::StatusOr<HloInstruction*> OptimizeDotOfGather(HloInstruction* dot);
absl::StatusOr<HloInstruction*> OptimizeDotOfReorderContractingDims(
HloInstruction* dot);
absl::StatusOr<HloInstruction*> AssociativeReorderDotOperator(
HloDotInstruction* dot);
HloComputation* GetOrCreateScalarAddComputation(PrimitiveType type) {
HloComputation*& scalar_add_computation = scalar_add_computations_[type];
if (scalar_add_computation) {
return scalar_add_computation;
}
HloComputation::Builder b("scalar_add_computation");
Shape shape = ShapeUtil::MakeShape(type, {});
simplifier_->UpdateLayout(&shape);
auto scalar_lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, shape, "scalar_lhs"));
auto scalar_rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, shape, "scalar_rhs"));
auto scalar_op = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, scalar_lhs, scalar_rhs));
scalar_add_computation =
computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
return scalar_add_computation;
}
virtual absl::StatusOr<bool> FoldConvInputPad(HloInstruction* convolution);
absl::StatusOr<bool> FoldConvFilterPad(HloInstruction* convolution);
absl::StatusOr<bool> SwapConvOperands(HloInstruction* convolution);
absl::StatusOr<bool> IsOneDnnRewritableBF16Conv(HloInstruction** convolution);
absl::StatusOr<bool> SimplifyConvToDot(HloInstruction* convolution);
absl::StatusOr<bool> SimplifyConvToMultiply(HloInstruction* convolution);
absl::StatusOr<bool> TrySimplifyScalarSlice(HloInstruction* slice);
absl::StatusOr<bool> TryToReorderSliceAndReshape(HloInstruction* slice);
absl::StatusOr<bool> TryToReorderSliceAndReverse(HloInstruction* slice);
absl::StatusOr<bool> TrySimplifyTautologicalCompare(
HloInstruction* conjunction);
absl::StatusOr<bool> TrySimplifyTautologicalBitcastConvert(
HloInstruction* bitcast);
absl::Status TryRemoveUpcastAndDowncastSurroundingBinaryOp(
HloInstruction* convert_instruction);
void ResetState(HloComputation* computation);
HloComputation* computation_;
absl::flat_hash_map<PrimitiveType, HloComputation*> scalar_add_computations_;
AlgebraicSimplifier* simplifier_ = nullptr;
};
}
#endif
#include "xla/service/algebraic_simplifier.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instruction_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_comparison.h"
#include "xla/literal_util.h"
#include "xla/overflow_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
using primitive_util::NativeTypeOf;
bool IsAll(const HloInstruction* op, int8_t value) {
switch (op->opcode()) {
case HloOpcode::kBroadcast:
return IsAll(op->operand(0), value);
case HloOpcode::kConstant:
return op->literal().IsAll(value);
default:
return false;
}
}
bool IsAllFloat(const HloInstruction* op, float value) {
switch (op->opcode()) {
case HloOpcode::kBroadcast:
return IsAllFloat(op->operand(0), value);
case HloOpcode::kConstant:
return op->literal().IsAllFloat(value);
default:
return false;
}
}
bool IsAll(const HloInstruction* op, const Literal& scalar) {
CHECK(ShapeUtil::IsScalar(scalar.shape()));
switch (op->opcode()) {
case HloOpcode::kBroadcast:
return IsAll(op->operand(0), scalar);
case HloOpcode::kConstant:
return op->literal().IsAll(scalar);
default:
return false;
}
}
bool IsAnyOperandComplex(const HloInstruction* hlo) {
for (auto operand : hlo->operands()) {
if (ShapeUtil::ElementIsComplex(operand->shape())) {
return true;
}
}
return false;
}
bool IsPositive(const HloInstruction* hlo,
const AlgebraicSimplifierOptions& options) {
if (IsAnyOperandComplex(hlo)) {
return false;
}
switch (hlo->opcode()) {
case HloOpcode::kGetTupleElement: {
const HloInstruction* gte_operand = hlo->operand(0);
switch (gte_operand->opcode()) {
case HloOpcode::kCustomCall: {
const auto& target = gte_operand->custom_call_target();
return target ==
options.get_cudnn_batchnorm_forward_training_metadata() &&
hlo->tuple_index() == 2;
}
default:
return false;
}
}
case HloOpcode::kPower:
case HloOpcode::kAbs:
case HloOpcode::kRsqrt:
case HloOpcode::kSqrt:
return IsPositive(hlo->operand(0), options);
case HloOpcode::kMultiply: {
return hlo->operand(0) == hlo->operand(1) &&
IsPositive(hlo->operand(0), options);
}
default:
return false;
}
}
std::optional<double> GetConstantValue(const HloInstruction* inst) {
if (!ShapeUtil::IsEffectiveScalar(inst->shape())) {
return std::nullopt;
}
return primitive_util::PrimitiveTypeSwitch<std::optional<double>>(
[&](auto primitive_type_constant) -> std::optional<double> {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = NativeTypeOf<primitive_type_constant>;
return static_cast<double>(
inst->literal().GetFirstElement<NativeT>());
}
return std::nullopt;
},
inst->shape().element_type());
}
static bool IsScalarConstant(const HloInstruction* hlo,
const LiteralSlice& literal) {
return hlo->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsEffectiveScalar(hlo->shape()) &&
literal_comparison::Equal(hlo->literal(), literal).ok();
}
static bool IsScalarConstantZero(const HloInstruction* hlo) {
return IsScalarConstant(hlo, LiteralUtil::Zero(hlo->shape().element_type()));
}
static bool IsScalarConstantNegInf(const HloInstruction* hlo) {
return !primitive_util::IsComplexType(hlo->shape().element_type()) &&
IsScalarConstant(hlo,
LiteralUtil::MinValue(hlo->shape().element_type()));
}
static bool IsScalarConstantInf(const HloInstruction* hlo) {
return !primitive_util::IsComplexType(hlo->s | #include "xla/service/algebraic_simplifier.h"
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/layout_assignment.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
namespace m = match;
class AlgebraicSimplifierTest : public HloTestBase {
public:
AlgebraicSimplifierTest()
: HloTestBase(true,
true,
LayoutAssignment::InstructionCanChangeLayout) {}
protected:
AlgebraicSimplifierOptions default_options_;
};
TEST_F(AlgebraicSimplifierTest, AddZero) {
auto m = CreateNewVerifiedModule();
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param0, zero));
auto computation = m->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kAdd);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(m.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root, param0);
}
TEST_F(AlgebraicSimplifierTest, FactorIntegerAddition) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[8] parameter(0)
p1 = s32[8] parameter(1)
p2 = s32[8] parameter(2)
x = s32[8] multiply(p0, p2)
y = s32[8] multiply(p1, p2)
ROOT sum = s32[8] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AddAnyOrder(m::Parameter(0), m::Parameter(1)), m::Parameter(2))));
}
TEST_F(AlgebraicSimplifierTest, FactorFpAddition) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
c = f32[] constant(0.125)
x = f32[] multiply(p0, c)
y = f32[] multiply(p1, c)
ROOT sum = f32[] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AddAnyOrder(m::Parameter(0), m::Parameter(1)),
m::ConstantScalar(0.125))));
}
TEST_F(AlgebraicSimplifierTest, SquareOfAbs) {
const char* kModuleStr = R"(
HloModule m
test {
p = f32[] parameter(0)
a = f32[] abs(p)
ROOT z = f32[] multiply(a, a)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(0))));
}
TEST_F(AlgebraicSimplifierTest, MultiplyChain) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
c = f32[] constant(2)
d = f32[] constant(4)
x = f32[] multiply(p0, c)
y = f32[] multiply(p1, d)
ROOT z = f32[] multiply(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::MultiplyAnyOrder(m::Parameter(0), m::Parameter(1)),
m::MultiplyAnyOrder(m::ConstantScalar(2), m::ConstantScalar(4)))));
}
TEST_F(AlgebraicSimplifierTest, MultiplyChain2) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
a = f32[] constant(2)
b = f32[] constant(4)
c = f32[] multiply(p0, a)
ROOT y = f32[] multiply(c, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::Parameter(0), m::MultiplyAnyOrder(m::ConstantScalar(2),
m::ConstantScalar(4)))));
}
TEST_F(AlgebraicSimplifierTest, MultiplyBroadcastReassoc) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[2,2] parameter(0)
p1 = f32[] parameter(1)
b = f32[] constant(2)
c = f32[2, 2] broadcast(b), dimensions={}
x = f32[2,2] multiply(p0, c)
y = f32[2,2] broadcast(p1), dimensions={}
ROOT z = f32[2,2] multiply(y, x)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::Parameter(0), m::Broadcast(m::MultiplyAnyOrder(
m::Parameter(1), m::Constant())))));
}
TEST_F(AlgebraicSimplifierTest, FactorFpAdditionWithBroadcast) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
c = f32[] constant(0.125)
b = f32[4] broadcast(c), dimensions={}
x = f32[4] multiply(p0, b)
y = f32[4] multiply(p1, b)
ROOT sum = f32[4] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AddAnyOrder(m::Parameter(0), m::Parameter(1)),
m::Broadcast(m::ConstantScalar(0.125)))));
}
TEST_F(AlgebraicSimplifierTest, FactorFpAdditionNotPowerOf2) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
c = f32[] constant(0.3)
x = f32[] multiply(p0, c)
y = f32[] multiply(p1, c)
ROOT sum = f32[] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_FALSE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
}
TEST_F(AlgebraicSimplifierTest, FactorFpAdditionComplex) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = c64[8] parameter(0)
p1 = c64[8] parameter(1)
p2 = c64[8] parameter(2)
x = c64[8] multiply(p0, p2)
y = c64[8] multiply(p1, p2)
ROOT sum = c64[8] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_FALSE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
}
TEST_F(AlgebraicSimplifierTest, FactorFpAdditionBfloat16) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = bf16[4] parameter(0)
p1 = bf16[4] parameter(1)
c = bf16[] constant(0.125)
b = bf16[4] broadcast(c), dimensions={}
x = bf16[4] multiply(p0, b)
y = bf16[4] multiply(p1, b)
ROOT sum = bf16[4] add(x, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AddAnyOrder(m::Parameter(0), m::Parameter(1)),
m::Broadcast(m::ConstantScalar(0.125)))));
}
TEST_F(AlgebraicSimplifierTest, UnsignedDivideByPowerOf2) {
const char* kModuleStr = R"(
HloModule m
test {
p = u32[4] parameter(0)
c = u32[] constant(8)
b = u32[4] broadcast(c), dimensions={}
ROOT d = u32[4] divide(p, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::ShiftRightLogical(
m::Parameter(0), m::Broadcast(m::ConstantScalar(3)))));
}
TEST_F(AlgebraicSimplifierTest, SignedDivideByPowerOf2) {
const char* kModuleStr = R"(
HloModule m
test {
p = s32[4] parameter(0)
c = s32[] constant(8)
b = s32[4] broadcast(c), dimensions={}
ROOT d = s32[4] divide(p, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
auto match_dividend_is_negative =
m::Lt(m::Parameter(0), m::Broadcast(m::ConstantScalar(0)));
auto match_abs = m::Select(match_dividend_is_negative,
m::Negate(m::Parameter(0)), m::Parameter(0));
auto match_shift =
m::ShiftRightLogical(match_abs, m::Broadcast(m::ConstantScalar(3)));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Select(match_dividend_is_negative,
m::Negate(match_shift), match_shift)));
}
TEST_F(AlgebraicSimplifierTest, UnsignedRemainderByPowerOf2) {
const char* kModuleStr = R"(
HloModule m
test {
p = u32[4] parameter(0)
c = u32[] constant(8)
b = u32[4] broadcast(c), dimensions={}
ROOT r = u32[4] remainder(p, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::AndAnyOrder(m::Parameter(0),
m::Broadcast(m::ConstantScalar(7)))));
}
TEST_F(AlgebraicSimplifierTest, SignedRemainderByPowerOf2) {
const char* kModuleStr = R"(
HloModule m
test {
p = s32[4] parameter(0)
c = s32[] constant(8)
b = s32[4] broadcast(c), dimensions={}
ROOT r = s32[4] remainder(p, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
auto match_dividend_is_negative =
m::Lt(m::Parameter(0), m::Broadcast(m::ConstantScalar(0)));
auto match_abs = m::Select(match_dividend_is_negative,
m::Negate(m::Parameter(0)), m::Parameter(0));
auto match_and =
m::AndAnyOrder(match_abs, m::Broadcast(m::ConstantScalar(7)));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Select(match_dividend_is_negative,
m::Negate(match_and), match_and)));
}
TEST_F(AlgebraicSimplifierTest, MulZero) {
auto m = CreateNewVerifiedModule();
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0s32, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
builder.AddInstruction(
HloInstruction::CreateBinary(r0s32, HloOpcode::kMultiply, param0, zero));
auto computation = m->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kMultiply);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_EQ(computation->root_instruction(), zero);
}
TEST_F(AlgebraicSimplifierTest, MultiplyReassociateMergeConstants) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
c0 = f32[] constant(2.0)
c1 = f32[] constant(3.0)
multiply0 = f32[] multiply(p0, c0)
ROOT multiply1 = f32[] multiply(multiply0, c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0),
m::Multiply(m::ConstantScalar(2.0),
m::ConstantScalar(3.0)))));
}
TEST_F(AlgebraicSimplifierTest, MultiplyReassociateMergeBroadcastedConstants) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[4] parameter(0)
c0 = f32[] constant(2.0)
c1 = f32[] constant(3.0)
b0 = f32[4] broadcast(c0), dimensions={}
b1 = f32[4] broadcast(c1), dimensions={}
multiply0 = f32[4] multiply(p0, b0)
ROOT multiply1 = f32[4] multiply(multiply0, b1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(
m::Parameter(0), m::Broadcast(m::Multiply(m::ConstantScalar(2.0),
m::ConstantScalar(3.0))))));
}
TEST_F(AlgebraicSimplifierTest, ElementwiseSinkMultipleBroadcastsScalar) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
b0 = f32[4] broadcast(p0), dimensions={}
b1 = f32[4] broadcast(p1), dimensions={}
ROOT multiply = f32[4] multiply(b1, b0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Broadcast(m::Multiply(m::Broadcast(m::Parameter(1)),
m::Broadcast(m::Parameter(0))))));
}
TEST_F(AlgebraicSimplifierTest, ElementwiseSinkMultipleBroadcastsConstantMix) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[4] parameter(0)
c0 = f32[] constant(2.0)
b0 = f32[4,2] broadcast(c0), dimensions={}
b1 = f32[4,2] broadcast(p0), dimensions={0}
ROOT multiply = f32[4,2] multiply(b1, b0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Broadcast(m::Multiply(
m::Parameter(0), m::Broadcast(m::ConstantScalar(2.0))))));
}
TEST_F(AlgebraicSimplifierTest, ElementwiseSinkMultipleBroadcastsNonScalar) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
b0 = f32[4,2] broadcast(p0), dimensions={0}
b1 = f32[4,2] broadcast(p1), dimensions={0}
ROOT multiply = f32[4,2] multiply(b1, b0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Broadcast(m::Multiply(m::Parameter(1), m::Parameter(0)))));
}
TEST_F(AlgebraicSimplifierTest, ElementwiseNoSinkBroadcastsDifferentDims) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = f32[4] parameter(0)
p1 = f32[8] parameter(1)
b0 = f32[4,8] broadcast(p0), dimensions={0}
b1 = f32[4,8] broadcast(p1), dimensions={1}
ROOT multiply = f32[4,8] multiply(b1, b0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_FALSE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Broadcast(m::Parameter(1)),
m::Broadcast(m::Parameter(0)))));
}
TEST_F(AlgebraicSimplifierTest,
MultiplyReassociateMultiplyOfConstantAndBroadcast) {
const char* kModuleStr = R"(
HloModule m
test {
c0 = f32[4] constant({2.0, 3.0, 4.0, 5.0})
c1 = f32[] constant(3.0)
c2 = f32[] constant(4.0)
b0 = f32[4] broadcast(c1), dimensions={}
b1 = f32[4] broadcast(c2), dimensions={}
multiply0 = f32[4] multiply(c0, b0)
ROOT multiply1 = f32[4] multiply(multiply0, b1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(
m::Constant(), m::Broadcast(m::Multiply(m::ConstantScalar(3.0),
m::ConstantScalar(4.0))))));
}
TEST_F(AlgebraicSimplifierTest, SelectTrue) {
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0s32, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0s32, "param1"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
builder.AddInstruction(HloInstruction::CreateTernary(
r0s32, HloOpcode::kSelect, one, param0, param1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_EQ(computation->root_instruction(), param0);
}
TEST_F(AlgebraicSimplifierTest, SelectTrueMixedPrecision) {
Shape r0bf16 = ShapeUtil::MakeShape(BF16, {});
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0bf16, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "param1"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
builder.AddInstruction(HloInstruction::CreateTernary(
r0f32, HloOpcode::kSelect, one, param0, param1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(AlgebraicSimplifierTest, SelectFalse) {
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0s32, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0s32, "param1"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateTernary(
r0s32, HloOpcode::kSelect, zero, param0, param1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_EQ(computation->root_instruction(), param1);
}
TEST_F(AlgebraicSimplifierTest, SelectFalseMixedPrecision) {
Shape r0bf16 = ShapeUtil::MakeShape(BF16, {});
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0bf16, "param1"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateTernary(
r0f32, HloOpcode::kSelect, one, param0, param1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(AlgebraicSimplifierTest, SelectIdentical) {
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0s32, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0s32, "param1"));
builder.AddInstruction(HloInstruction::CreateTernary(
r0s32, HloOpcode::kSelect, param0, param1, param1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_EQ(computation->root_instruction(), param1);
}
TEST_F(AlgebraicSimplifierTest, SelectIdenticalMixedPrecision) {
Shape r0bf16 = ShapeUtil::MakeShape(BF16, {});
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape r0pred = ShapeUtil::MakeShape(PRED, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0pred, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0bf16, "param1"));
builder.AddInstruction(HloInstruction::CreateTernary(
r0f32, HloOpcode::kSelect, param0, param1, param1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(AlgebraicSimplifierTest, SelectWithNotPred) {
Shape pred_ty = ShapeUtil::MakeShape(PRED, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, pred_ty, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0s32, "param1"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, r0s32, "param2"));
HloInstruction* pred_instr = builder.AddInstruction(
HloInstruction::CreateUnary(pred_ty, HloOpcode::kNot, param0));
builder.AddInstruction(HloInstruction::CreateTernary(
r0s32, HloOpcode::kSelect, pred_instr, param1, param2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(module.get()).value());
const auto& operands = computation->root_instruction()->operands();
EXPECT_EQ(operands[0], param0);
EXPECT_EQ(operands[1], param2);
EXPECT_EQ(operands[2], param1);
}
TEST_F(AlgebraicSimplifierTest, SelectPredPred) {
Shape r0pred = ShapeUtil::MakeShape(PRED, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0pred, "param0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateTernary(
r0pred, HloOpcode::kSelect, param0, one, zero));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_EQ(computation->root_instruction(), param0);
}
TEST_F(AlgebraicSimplifierTest, SelectPredPred2) {
auto m = CreateNewVerifiedModule();
Shape r0pred = ShapeUtil::MakeShape(PRED, {});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0pred, "param0"));
HloInstruction* zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
builder.AddInstruction(HloInstruction::CreateTernary(
r0pred, HloOpcode::kSelect, param0, zero, one));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputationWithLayouts(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
AlgebraicSimplifier simplifier(default_options_);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Not(m::Parameter(0))));
}
TEST_F(AlgebraicSimplifierTest, SelectGtCompare) {
for (const auto cmp_dir : {"GT", "GE"}) {
const auto kModuleStr = absl::StrFormat(R"(
HloModule m
test {
p0 = pred[8]{0} parameter(0)
p1 = pred[8]{0} parameter(1)
compare = pred[8]{0} compare(p0, p1), direction=%s
ROOT select = pred[8]{0} select(compare, p0, p1)
}
)",
cmp_dir);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Or(m::Parameter(0), m::Parameter(1))));
}
}
TEST_F(AlgebraicSimplifierTest, SelectLtCompare) {
for (const auto cmp_dir : {"LT", "LE"}) {
const auto kModuleStr = absl::StrFormat(R"(
HloModule m
test {
p0 = pred[8]{0} parameter(0)
p1 = pred[8]{0} parameter(1)
compare = pred[8]{0} compare(p0, p1), direction=%s
ROOT select = pred[8]{0} select(compare, p0, p1)
}
)",
cmp_dir);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::And(m::Parameter(0), m::Parameter(1))));
}
}
TEST_F(AlgebraicSimplifierTest, SelectEqCompare) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = pred[8]{0} parameter(0)
p1 = pred[8]{0} parameter(1)
compare = pred[8]{0} compare(p0, p1), direction=EQ
ROOT select = pred[8]{0} select(compare, p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(1)));
}
TEST_F(AlgebraicSimplifierTest, SelectNeCompare) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = pred[8]{0} parameter(0)
p1 = pred[8]{0} parameter(1)
compare = pred[8]{0} compare(p0, p1), direction=NE
ROOT select = pred[8]{0} select(compare, p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_TRUE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AlgebraicSimplifierTest, SelectNeCompare_NegativeTestCase) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = pred[8]{0} parameter(0)
p1 = pred[8]{0} parameter(1)
compare = pred[8]{0} compare(p0, p1), direction=NE
ROOT select = pred[8]{0} select(compare, p1, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
ASSERT_FALSE(AlgebraicSimplifier(default_options_).Run(m.get()).value());
} |
1,946 | cpp | tensorflow/tensorflow | hlo_module_config | third_party/xla/xla/service/hlo_module_config.cc | third_party/xla/xla/service/hlo_module_config_test.cc | #ifndef XLA_SERVICE_HLO_MODULE_CONFIG_H_
#define XLA_SERVICE_HLO_MODULE_CONFIG_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/service/computation_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
namespace xla {
enum class FusionConfigCollection {
kOff,
kPerEdge,
kPerNode,
};
class HloModuleConfig {
public:
struct ShardableValueUpdatePair {
int64_t input_parameter_number;
ShapeIndex parameter_shape_index;
ShapeIndex output_shape_index;
};
HloModuleConfig() { debug_options_ = DefaultDebugOptionsIgnoringFlags(); }
explicit HloModuleConfig(const ProgramShape& program_shape,
bool ignore_layouts = true);
explicit HloModuleConfig(ComputationLayout entry_computation_layout);
HloModuleConfigProto ToProto() const;
static absl::StatusOr<std::unique_ptr<HloModuleConfig>> CreateFromProto(
const HloModuleConfigProto& proto);
static void AssignProtoShardableValueUpdatePairs(
tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>*
proto_update_pairs,
const std::vector<HloModuleConfig::ShardableValueUpdatePair>&
update_pairs);
static void AssignStructShardableValueUpdatePairs(
HloModuleConfig& config,
const tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>&
pairs);
bool has_entry_computation_layout() const {
return entry_computation_layout_.has_value();
}
void SetDefaultComputationLayout(const ProgramShape& program_shape);
void SetComputationLayoutIfExists(const ProgramShape& program_shape);
const ComputationLayout& entry_computation_layout() const {
CHECK(entry_computation_layout_.has_value());
return *entry_computation_layout_;
}
ComputationLayout* mutable_entry_computation_layout() {
CHECK(entry_computation_layout_.has_value());
return &(*entry_computation_layout_);
}
void clear_entry_computation_layout() {
entry_computation_layout_ = std::nullopt;
}
bool hlo_profiling_enabled() const {
return debug_options_.xla_hlo_profile();
}
bool cpu_traceme_enabled() const {
return debug_options_.xla_cpu_enable_xprof_traceme();
}
void set_seed(uint64_t seed) { seed_ = seed; }
uint64_t seed() const { return seed_; }
void set_launch_id(uint64_t launch_id) { launch_id_ = launch_id; }
int32_t launch_id() const { return launch_id_; }
void set_replica_count(int64_t replica_count) {
replica_count_ = replica_count;
}
int64_t replica_count() const { return replica_count_; }
void set_num_partitions(int64_t num_partitions) {
num_partitions_ = num_partitions;
}
int64_t num_partitions() const { return num_partitions_; }
const std::vector<bool>& param_requires_broadcast_via_collectives() const {
return param_requires_broadcast_via_collectives_;
}
void set_param_requires_broadcast_via_collectives(
std::vector<bool> require_broadcast) {
param_requires_broadcast_via_collectives_ = std::move(require_broadcast);
}
void set_use_spmd_partitioning(bool use_spmd_partitioning) {
use_spmd_partitioning_ = use_spmd_partitioning;
}
bool use_spmd_partitioning() const { return use_spmd_partitioning_; }
void set_use_auto_spmd_partitioning(bool use_auto_spmd_partitioning) {
use_auto_spmd_partitioning_ = use_auto_spmd_partitioning;
if (use_auto_spmd_partitioning) {
LOG(WARNING) << "Warning: Using auto_spmd_partitioning. It is "
"experimental and may contain bugs!";
LOG(INFO) << "Overwriting use_spmd_partitioning to true, because "
"use_auto_spmd_partitioning is true.";
set_use_spmd_partitioning(true);
}
}
bool use_auto_spmd_partitioning() const {
return use_auto_spmd_partitioning_;
}
void set_auto_spmd_partitioning_mesh_shape(std::vector<int64_t> mesh_shape) {
auto_spmd_partitioning_mesh_shape_ = std::move(mesh_shape);
}
const std::vector<int64_t>& auto_spmd_partitioning_mesh_shape() const {
return auto_spmd_partitioning_mesh_shape_;
}
void set_auto_spmd_partitioning_mesh_ids(std::vector<int64_t> mesh_ids) {
auto_spmd_partitioning_mesh_ids_ = std::move(mesh_ids);
}
const std::vector<int64_t>& auto_spmd_partitioning_mesh_ids() const {
return auto_spmd_partitioning_mesh_ids_;
}
void set_deduplicate_hlo(bool deduplicate_hlo) {
deduplicate_hlo_ = deduplicate_hlo;
}
bool deduplicate_hlo() const { return deduplicate_hlo_; }
void set_device_type(const std::string& device_type) {
device_type_ = device_type;
}
absl::string_view device_type() const { return device_type_; }
std::string compilation_cache_key() const;
const DebugOptions& debug_options() const { return debug_options_; }
void set_debug_options(const DebugOptions& debug_options) {
debug_options_ = debug_options;
}
void set_intra_op_parallelism_threads(
const int intra_op_parallelism_threads) {
intra_op_parallelism_threads_ = intra_op_parallelism_threads;
}
int64_t intra_op_parallelism_threads() const {
return intra_op_parallelism_threads_;
}
bool has_static_device_assignment() const {
return static_device_assignment_.has_value();
}
const DeviceAssignment& static_device_assignment() const {
CHECK(static_device_assignment_.has_value());
return *static_device_assignment_;
}
void set_static_device_assignment(const DeviceAssignment& device_assignment) {
static_device_assignment_ = device_assignment;
}
bool allow_separate_sharding_programs() const {
return allow_separate_sharding_programs_;
}
void set_allow_separate_sharding_programs(
bool allow_separate_sharding_programs) {
allow_separate_sharding_programs_ = allow_separate_sharding_programs;
}
const std::vector<ShardableValueUpdatePair>& shardable_value_update_pairs()
const {
return shardable_value_update_pairs_;
}
void set_shardable_value_update_pairs(
std::vector<ShardableValueUpdatePair> pairs) {
shardable_value_update_pairs_ = std::move(pairs);
}
bool alias_passthrough_params() const { return alias_passthrough_params_; }
void set_alias_passthrough_params(bool alias_passthrough_params) {
alias_passthrough_params_ = alias_passthrough_params;
}
bool content_aware_computation_sorting() const {
return content_aware_computation_sorting_;
}
void set_content_aware_computation_sorting(
bool content_aware_computation_sorting) {
content_aware_computation_sorting_ = content_aware_computation_sorting;
}
FusionConfigCollection fusion_config_collection() const {
return fusion_config_collection_;
}
void set_fusion_config_collection(
FusionConfigCollection fusion_config_collection) {
fusion_config_collection_ = fusion_config_collection;
}
const std::vector<std::vector<bool>>& fusion_config() const {
return fusion_config_;
}
std::vector<std::vector<bool>>* mutable_fusion_config() {
return &fusion_config_;
}
const absl::flat_hash_map<std::string, std::vector<int64_t>>& dot_config()
const {
return dot_config_;
}
absl::flat_hash_map<std::string, std::vector<int64_t>>* mutable_dot_config() {
return &dot_config_;
}
const std::vector<std::vector<std::vector<int64_t>>>& layout_config() const {
return layout_config_;
}
std::vector<std::vector<std::vector<int64_t>>>* mutable_layout_config() {
return &layout_config_;
}
const std::vector<std::vector<bool>>& phase_ordering_config() const {
return phase_ordering_config_;
}
std::vector<std::vector<bool>>* mutable_phase_ordering_config() {
return &phase_ordering_config_;
}
int phase_index() const { return phase_index_; }
void set_phase_index(const int phase_index) { phase_index_ = phase_index; }
absl::Span<const bool> allow_spmd_sharding_propagation_to_parameters() const {
return allow_spmd_sharding_propagation_to_parameters_;
}
absl::Span<const bool> allow_spmd_sharding_propagation_to_output() const {
return allow_spmd_sharding_propagation_to_output_;
}
void set_allow_spmd_sharding_propagation_to_parameters(
absl::Span<const bool> data) {
return allow_spmd_sharding_propagation_to_parameters_.assign(data.begin(),
data.end());
}
void set_allow_spmd_sharding_propagation_to_output(
absl::Span<const bool> data) {
return allow_spmd_sharding_propagation_to_output_.assign(data.begin(),
data.end());
}
const std::vector<uint64_t>& memory_space_assignment_config() const {
return memory_space_assignment_config_;
}
std::vector<uint64_t>* mutable_memory_space_assignment_config() {
return &memory_space_assignment_config_;
}
int64_t GetAnalysisAllowance(absl::string_view pass_name) const {
auto it = analysis_allowance_map_.find(pass_name);
if (it == analysis_allowance_map_.end()) {
return -1;
}
return (*it).second;
}
void SetAnalysisAllowance(absl::string_view pass_name, int64_t allowance) {
analysis_allowance_map_[pass_name] = allowance;
}
PrecisionConfig::Precision matrix_unit_operand_precision() const {
return matrix_unit_operand_precision_;
}
void set_matrix_unit_operand_precision(
PrecisionConfig::Precision matrix_unit_operand_precision) {
matrix_unit_operand_precision_ = matrix_unit_operand_precision;
}
absl::string_view fdo_profile() const { return fdo_profile_; }
std::string* mutable_fdo_profile() { return &fdo_profile_; }
int64_t device_memory_size() const { return device_memory_size_; }
void set_device_memory_size(int64_t device_memory_size) {
device_memory_size_ = device_memory_size;
}
private:
std::optional<ComputationLayout> entry_computation_layout_;
uint64_t seed_ = 0;
int32_t launch_id_ = 0;
int64_t replica_count_ = 1;
int64_t num_partitions_ = 1;
std::vector<bool> param_requires_broadcast_via_collectives_;
bool use_spmd_partitioning_ = false;
bool use_auto_spmd_partitioning_ = false;
std::vector<int64_t> auto_spmd_partitioning_mesh_shape_;
std::vector<int64_t> auto_spmd_partitioning_mesh_ids_;
bool deduplicate_hlo_ = false;
int64_t intra_op_parallelism_threads_ = -1;
std::string device_type_;
DebugOptions debug_options_;
std::optional<DeviceAssignment> static_device_assignment_;
bool allow_separate_sharding_programs_ = false;
std::vector<ShardableValueUpdatePair> shardable_value_update_pairs_;
bool alias_passthrough_params_ = false;
bool content_aware_computation_sorting_ = false;
FusionConfigCollection fusion_config_collection_ =
FusionConfigCollection::kOff;
std::vector<std::vector<bool>> fusion_config_;
absl::flat_hash_map<std::string, std::vector<int64_t>> dot_config_;
std::vector<std::vector<std::vector<int64_t>>> layout_config_;
std::vector<uint64_t> memory_space_assignment_config_;
std::vector<std::vector<bool>> phase_ordering_config_;
int phase_index_ = 0;
absl::InlinedVector<bool, 1> allow_spmd_sharding_propagation_to_parameters_ =
{false};
absl::InlinedVector<bool, 1> allow_spmd_sharding_propagation_to_output_ = {
false};
absl::flat_hash_map<std::string, int64_t> analysis_allowance_map_;
PrecisionConfig::Precision matrix_unit_operand_precision_ =
PrecisionConfig::DEFAULT;
std::string fdo_profile_;
int64_t device_memory_size_ = 0;
};
}
#endif
#include "xla/service/hlo_module_config.h"
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_layout.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrAppend;
HloModuleConfig::HloModuleConfig(const ProgramShape& program_shape,
bool ignore_layouts)
: entry_computation_layout_(
ComputationLayout(program_shape, ignore_layouts)) {}
HloModuleConfig::HloModuleConfig(ComputationLayout entry_computation_layout)
: entry_computation_layout_(std::move(entry_computation_layout)) {}
void HloModuleConfig::SetDefaultComputationLayout(
const ProgramShape& program_shape) {
entry_computation_layout_ = ComputationLayout(program_shape);
}
void HloModuleConfig::SetComputationLayoutIfExists(
const ProgramShape& program_shape) {
entry_computation_layout_ = ComputationLayout(program_shape,
false);
}
std::string HloModuleConfig::compilation_cache_key() const {
std::string key = absl::StrCat("profiling=", hlo_profiling_enabled());
StrAppend(&key, "::(");
std::vector<std::string> params;
if (entry_computation_layout_.has_value()) {
for (const ShapeLayout& param_layout :
entry_computation_layout_->parameter_layouts()) {
params.push_back(param_layout.shape().DebugString());
}
StrAppend(&key, absl::StrJoin(params, ", "), ") => ",
entry_computation_layout_->result_shape().SerializeAsString());
}
if (seed() != 0) {
static std::atomic<int> counter{0};
StrAppend(&key, "forcing recompile ", counter++);
}
if (replica_count() != 1) {
StrAppend(&key, "::replica_count=", replica_count());
}
StrAppend(&key, debug_options_.DebugString());
if (intra_op_parallelism_threads() > 0) {
StrAppend(&key, "::intra_op_parallelism_threads=",
intra_op_parallelism_threads());
}
if (!device_type().empty()) {
StrAppend(&key, device_type());
}
StrAppend(&key, "::alias_passthrough_params=", alias_passthrough_params_);
StrAppend(&key, "::allow_spmd_sharding_propagation_to_parameters={",
absl::StrJoin(allow_spmd_sharding_propagation_to_parameters_, ","),
"}");
StrAppend(&key, "::allow_spmd_sharding_propagation_to_output={",
absl::StrJoin(allow_spmd_sharding_propagation_to_output_, ","),
"}");
if (!fdo_profile().empty()) {
StrAppend(&key, "::fdo_profile=", absl::BytesToHexString(fdo_profile()));
}
if (device_memory_size() != 0) {
StrAppend(&key, "::device_memory_size=", device_memory_size());
}
return key;
}
void HloModuleConfig::AssignProtoShardableValueUpdatePairs(
tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>*
proto_update_pairs,
const std::vector<HloModuleConfig::ShardableValueUpdatePair>&
update_pairs) {
using ProtoShard = std::decay_t<decltype(proto_update_pairs->at(0))>;
proto_update_pairs->Reserve(update_pairs.size());
for (const auto& pair : update_pairs) {
ProtoShard shard;
shard.set_input_parameter_number(pair.input_parameter_number);
for (int64_t val : pair.parameter_shape_index) {
shard.add_parameter_shape_index(val);
}
for (int64_t val : pair.output_shape_index) {
shard.add_output_shape_index(val);
}
proto_update_pairs->Add(std::move(shard));
}
}
static HloModuleConfigProto::BoolList BoolVectorToProto(
const std::vector<bool>& vals) {
HloModuleConfigProto::BoolList list;
for (int i = 0; i < vals.size(); ++i) {
list.add_vals(vals[i]);
}
return list;
}
static void AssignProtoFusionConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<bool>>& fusion_config) {
auto* proto_config = proto.mutable_fusion_config();
proto_config->Reserve(fusion_config.size());
for (const auto& vals : fusion_config) {
proto_config->Add(BoolVectorToProto(vals));
}
}
static void AssignProtoDotConfig(
HloModuleConfigProto& proto,
const absl::flat_hash_map<std::string, std::vector<int64_t>>& dot_config) {
std::map<std::string, std::vector<int64_t>> sorted_dot_config;
sorted_dot_config.insert(dot_config.begin(), dot_config.end());
for (const auto& [key, list_vector] : sorted_dot_config) {
HloModuleConfigProto::Int64List list;
for (int64_t val : list_vector) {
list.add_vals(val);
}
proto.mutable_dot_config()->try_emplace(key, std::move(list));
}
}
static void AssignProtoLayoutConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<std::vector<int64_t>>>& layout_config) {
auto* proto_layout_config = proto.mutable_layout_config();
proto_layout_config->Reserve(layout_config.size());
for (const auto& config_row : layout_config) {
HloModuleConfigProto::Int64ListList proto_list_list;
proto_list_list.mutable_lists()->Reserve(config_row.size());
for (const auto& cell : config_row) {
HloModuleConfigProto::Int64List list;
for (int64_t val : cell) {
list.add_vals(val);
}
*proto_list_list.add_lists() = std::move(list);
}
proto_layout_config->Add(std::move(proto_list_list));
}
}
static void AssignProtoPhaseOrderingConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<bool>>& phase_config) {
auto* proto_config = proto.mutable_phase_ordering_config();
proto_config->Reserve(phase_config.size());
for (const auto& vals : phase_config) {
proto_config->Add(BoolVectorToProto(vals));
}
}
void HloModuleConfig::AssignStructShardableValueUpdatePairs(
HloModuleConfig& config,
const tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>&
pairs) {
std::vector<HloModuleConfig::ShardableValueUpdatePair> cfg_pairs;
cfg_pairs.reserve(pairs.size());
for (const auto& proto_pair : pairs) {
HloModuleConfig::ShardableValueUpdatePair pair;
pair.input_parameter_number = proto_pair.input_parameter_number();
const auto param_idx = proto_pair.parameter_shape_index();
pair.parameter_shape_index.assign(param_idx.begin(), param_idx.end());
const auto output_idx = proto_pair.output_shape_index();
pair.output_shape_index.assign(output_idx.begin(), output_idx.end());
cfg_pairs.push_back(pair);
}
config.set_shardable_value_update_pairs(std::move(cfg_pairs));
}
static void AssignStructFusionConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<bool>> module_config;
auto& proto_config = proto.fusion_config();
module_config.reserve(proto_config.size());
for (auto& list : proto_config) {
std::vector<bool> temp;
for (bool val : list.vals()) {
temp.push_back(val);
}
module_config.push_back(std::move(temp));
}
*config.mutable_fusion_config() = std::move(module_config);
}
static void AssignStructDotConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
auto& proto_config = proto.dot_config();
for (auto& [key, int_list] : proto_config) {
std::vector<int64_t> value{int_list.vals().begin(), int_list.vals().end()};
config.mutable_dot_config()->insert(std::pair{key, value});
}
}
static void AssignStructLayoutConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<std::vector<int64_t>>> module_config;
auto proto_config = proto.layout_config();
module_config.reserve(proto_config.size());
for (const auto& proto_row_wrapper : proto_config) {
const auto& proto_row = proto_row_wrapper.lists();
std::vector<std::vector<int64_t>> module_row;
module_row.reserve(proto_row.size());
for (const auto& proto_cell : proto_row) {
const auto& cell = proto_cell.vals();
module_row.push_back(std::vector<int64_t>(cell.begin(), cell.end()));
}
module_config.push_back(std::move(module_row));
}
*config.mutable_layout_config() = std::move(module_config);
}
static void AssignStructPhaseOrderingConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<bool>> module_config;
auto& proto_config = proto.phase_ordering_config();
module_config.reserve(proto_config.size());
for (auto& list : proto_config) {
std::vector<bool> temp;
for (bool val : list.vals()) {
temp.push_back(val);
}
module_config.push_back(std::move(temp));
}
*config.mutable_phase_ordering_config() = std::move(module_config);
}
HloModuleConfigProto HloModuleConfig::ToProto() const {
HloModuleConfigProto proto;
if (has_entry_computation_layout()) {
*proto.mutable_entry_computation_layout() =
entry_computation_layout().ComputeProgramShape().ToProto();
}
proto.set_seed(seed_);
proto.set_launch_id(launch_id_);
proto.set_replica_count(replica_count_);
proto.set_num_partitions(num_partitions_);
for (bool requirement : param_requires_broadcast_via_collectives_) {
proto.add_param_requires_broadcast_via_collectives(requirement);
}
proto.set_use_spmd_partitioning(use_spmd_partitioning_);
proto.set_use_auto_spmd_partitioning(use_auto_spmd_partitioning_);
for (int64_t partitioning_shape : auto_spmd_partitioning_mesh_shape_) {
proto.add_auto_spmd_partitioning_mesh_shape(partitioning_shape);
}
for (int64_t partitioning_id : auto_spmd_partitioning_mesh_ids_) {
proto.add_auto_spmd_partitioning_mesh_ids(partitioning_id);
}
proto.set_deduplicate_hlo(deduplicate_hlo_);
proto.set_intra_op_parallelism_threads(intra_op_parallelism_threads_);
proto.set_device_type(device_type_);
*proto.mutable_debug_options() = debug_options_;
if (has_static_device_assignment()) {
auto proto_assignment = proto.mutable_static_device_assignment();
static_device_assignment_->Serialize(proto_assignment);
}
AssignProtoShardableValueUpdatePairs(
proto.mutable_shardable_value_update_pairs(),
shardable_value_update_pairs_);
proto.set_alias_passthrough_params(alias_passthrough_params_);
proto.set_content_aware_computation_sorting(
content_aware_computation_sorting_);
proto.set_fusion_config_collection(
static_cast<HloModuleConfigProto::FusionConfigCollection>(
fusion_config_collection_));
AssignProtoFusionConfig(proto, fusion_config_);
AssignProtoDotConfig(proto, dot_config_);
AssignProtoLayoutConfig(proto, layout_config_);
for (uint64_t cfg : memory_space_assignment_config_) {
proto.add_memory_space_assignment_config(cfg);
}
AssignProtoPhaseOrderingConfig(proto, phase_ordering_config_);
proto.set_phase_index(phase_index_);
for (bool value : allow_spmd_sharding_propagation_to_parameters_) {
proto.add_allow_spmd_sharding_propagation_to_parameters(value);
}
for (bool value : allow_spmd_sharding_propagation_to_output_) {
proto.add_allow_spmd_sharding_propagation_to_output(value);
}
auto proto_analysis_map = proto.mutable_analysis_allowance_map();
for (const auto& [key, value] : analysis_allowance_map_) {
proto_analysis_map->insert({std::string(key), value});
}
proto.set_matrix_unit_operand_precision(matrix_unit_operand_precision_);
proto.set_allow_separate_sharding_programs(allow_separate_sharding_programs_);
proto.set_fdo_profile(fdo_profile_);
proto.set_device_memory_size(device_memory_size_);
return proto;
}
absl::StatusOr<std::unique_ptr<HloModuleConfig>>
HloModuleConfig::CreateFromProto(const HloModuleConfigProto& proto) {
auto config = s | #include "xla/service/hlo_module_config.h"
#include <string>
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(HloModuleConfigTest, ShardableValueUpdatePairProtoRoundTrip) {
const std::string text_proto = R"(
shardable_value_update_pairs {
input_parameter_number: 2
parameter_shape_index: 0
parameter_shape_index: 1
output_shape_index: 1
output_shape_index: 0
}
shardable_value_update_pairs {
input_parameter_number: 1
parameter_shape_index: 2
output_shape_index: 3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto input_proto,
ParseTextProto<HloModuleConfigProto>(text_proto));
HloModuleConfig config;
HloModuleConfig::AssignStructShardableValueUpdatePairs(
config, input_proto.shardable_value_update_pairs());
EXPECT_EQ(config.shardable_value_update_pairs().size(), 2);
HloModuleConfigProto output_proto;
HloModuleConfig::AssignProtoShardableValueUpdatePairs(
output_proto.mutable_shardable_value_update_pairs(),
config.shardable_value_update_pairs());
EXPECT_EQ(input_proto.SerializeAsString(), output_proto.SerializeAsString());
}
}
} |
1,947 | cpp | tensorflow/tensorflow | profile_guided_latency_estimator | third_party/xla/xla/service/profile_guided_latency_estimator.cc | third_party/xla/xla/service/profile_guided_latency_estimator_test.cc | #ifndef XLA_SERVICE_PROFILE_GUIDED_LATENCY_ESTIMATOR_H_
#define XLA_SERVICE_PROFILE_GUIDED_LATENCY_ESTIMATOR_H_
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
class ProfileGuidedLatencyEstimator : public LatencyEstimator {
public:
ProfileGuidedLatencyEstimator(
const SchedulerConfig& config,
std::unique_ptr<LatencyEstimator> latency_estimator,
const tensorflow::profiler::ProfiledInstructionsProto& proto);
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const override;
TimeCost NodeCost(const HloInstruction* instr) const override;
int CyclesPerMicrosecond() const override {
return latency_estimator_->CyclesPerMicrosecond();
}
private:
const SchedulerConfig config_;
std::unique_ptr<LatencyEstimator> latency_estimator_;
struct ProfileInfo {
std::optional<TimeCost> cost;
absl::flat_hash_map<std::string, TimeCost> latencies;
};
absl::flat_hash_map<std::string, ProfileInfo> instr_map_;
};
}
#endif
#include "xla/service/profile_guided_latency_estimator.h"
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
LatencyEstimator::TimeCost ProfileGuidedLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& target) const {
static constexpr HloGraphNode::TimeCost kLowLatency = 1.0;
const HloOpcode from_op = from.GetInstr().opcode();
if (!config_.schedule_send_recvs &&
(from_op == HloOpcode::kSend || from_op == HloOpcode::kRecv)) {
return kLowLatency;
}
auto it = instr_map_.find(from.GetInstr().name());
if (it == instr_map_.end() &&
(from.GetInstr().opcode() == HloOpcode::kAsyncStart ||
from.GetInstr().opcode() == HloOpcode::kAsyncDone)) {
absl::string_view wrapped_inst_name =
from.GetInstr().async_wrapped_instruction()->name();
VLOG(2) << "PGLE found async wrapped instruction: " << wrapped_inst_name
<< " in " << from.GetInstr().name();
it = instr_map_.find(wrapped_inst_name);
}
if (it == instr_map_.end()) {
VLOG(1)
<< "PGLE did NOT find wrapped instruction name or async start. From: "
<< from.GetInstr().name();
return latency_estimator_->GetLatencyBetween(from, target);
}
auto it2 = it->second.latencies.find(target.GetInstr().name());
if (it2 == it->second.latencies.end() &&
(target.GetInstr().opcode() == HloOpcode::kAsyncStart ||
target.GetInstr().opcode() == HloOpcode::kAsyncDone)) {
it2 = it->second.latencies.find(
target.GetInstr().async_wrapped_instruction()->name());
}
if (it2 != it->second.latencies.end()) {
VLOG(2) << "PGLE found latency between " << from.GetInstr().name()
<< " and " << target.GetInstr().name() << " in latency info";
return it2->second * CyclesPerMicrosecond();
}
if (it->second.cost.has_value() &&
(IsAsyncPair(from, target) || IsP2pPair(from, target))) {
VLOG(2) << "PGLE found latency for async op " << from.GetInstr().name()
<< " and (assumed)" << target.GetInstr().name()
<< " in instruction costs";
return *it->second.cost * CyclesPerMicrosecond();
}
VLOG(1) << "PGLE did not find relevant profiling info for '"
<< from.GetInstr().name() << "', and '" << target.GetInstr().name()
<< "'.";
return latency_estimator_->GetLatencyBetween(from, target);
}
LatencyEstimator::TimeCost ProfileGuidedLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (hlo_query::IsAsyncCollectiveStartOp(instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
static constexpr TimeCost kLowCost = 1.0;
return kLowCost;
}
if (auto it = instr_map_.find(instr->name());
it != instr_map_.end() && it->second.cost.has_value()) {
VLOG(2) << "PGLE found cost for: " << instr->name();
return *it->second.cost;
}
VLOG(1) << "PGLE missed cost for: " << instr->name();
return latency_estimator_->NodeCost(instr);
}
ProfileGuidedLatencyEstimator::ProfileGuidedLatencyEstimator(
const SchedulerConfig& config,
std::unique_ptr<LatencyEstimator> latency_estimator,
const tensorflow::profiler::ProfiledInstructionsProto& proto)
: config_(config), latency_estimator_(std::move(latency_estimator)) {
const int cycles_per_microsecond = latency_estimator_->CyclesPerMicrosecond();
for (const auto& instr_cost : proto.costs()) {
instr_map_[instr_cost.name()] =
ProfileInfo{instr_cost.cost_us() * cycles_per_microsecond};
}
for (const auto& latency : proto.latencies()) {
auto it = instr_map_.insert(std::make_pair(latency.source(), ProfileInfo{}))
.first;
it->second.latencies[latency.target()] =
latency.latency_us() * cycles_per_microsecond;
}
}
} | #include "xla/service/profile_guided_latency_estimator.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace {
int GetIndex(absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
}
SchedulerConfig GetDefaultSchedConfig() {
SchedulerConfig sched_cfg;
return sched_cfg;
}
absl::StatusOr<bool> RunScheduler(
HloModule* module, const SchedulerConfig& sched_config,
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>()) {
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
auto async_tracker = std::make_unique<AsyncTracker>(sched_config);
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
bool value, LatencyHidingScheduler(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
}
class LatencyHidingSchedulerTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
return ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest());
}
};
TEST_P(LatencyHidingSchedulerTest, TestProfileGuidedLatencyEstimator) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[1024,2048,2048]{2,1,0} parameter(2)
p3 = f32[2048,2048,2048]{2,1,0} parameter(3)
cp1s = (f32[1024,2048,2048]{2,1,0}, f32[1024,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p2), source_target_pairs={{1,0},{0,3},{3,2}}
cp2s = (f32[2048,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p3), source_target_pairs={{1,0},{0,3},{3,2}}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
cp1d = f32[1024,2048,2048]{2,1,0} collective-permute-done(cp1s)
cp2d = f32[2048,2048,2048]{2,1,0} collective-permute-done(cp2s)
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[1024,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}) tuple(c0, cp1d, cp2d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
std::string profiled_instructions_text_proto;
if (GetParam()) {
profiled_instructions_text_proto = R"pb(
costs { name: "c0" cost_us: 10.0 }
latencies { source: "cp1s" target: "cp1d" latency_us: 40.0 }
latencies { source: "cp2s" target: "cp2d" latency_us: 80.0 }
)pb";
} else {
profiled_instructions_text_proto = R"pb(
costs { name: "c0" cost_us: 10.0 }
costs { name: "cp1s" cost_us: 40.0 }
costs { name: "cp2s" cost_us: 80.0 }
)pb";
}
tensorflow::profiler::ProfiledInstructionsProto profiled_instructions_proto;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
profiled_instructions_text_proto, &profiled_instructions_proto));
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
profiled_instructions_proto);
EXPECT_TRUE(
RunScheduler(hlo_module.get(), sched_config, std::move(latency_estimator))
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2s"),
GetIndex(new_instruction_sequence, "cp1s"));
}
INSTANTIATE_TEST_SUITE_P(LatencyHidingSchedulerTest, LatencyHidingSchedulerTest,
::testing::Bool());
using ProfileGuidedLatencyEstimatorTest = HloTestBase;
TEST_F(ProfileGuidedLatencyEstimatorTest,
TestProfileGuidedLatencyEstimatorWithAsyncInstruction) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
add.1 {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
reduce-scatter-start = ((f32[16,64,256]{2,1,0}, f32[16,64,256]{2,1,0}), (f32[4,64,256]{2,1,0}, f32[4,64,256]{2,1,0})) reduce-scatter-start(p0, p1), channel_id=1, replica_groups={}, dimensions={0}, to_apply=add.1
reduce-scatter-done = (f32[4,64,256]{2,1,0}, f32[4,64,256]{2,1,0}) reduce-scatter-done(reduce-scatter-start)
ROOT gte = f32[4,64,256]{2,1,0} get-tuple-element(reduce-scatter-done), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_TRUE(hlo_module->has_entry_computation());
std::string profiled_instructions_text_proto = R"pb(
costs { name: "reduce-scatter" cost_us: 120.0 }
)pb";
;
tensorflow::profiler::ProfiledInstructionsProto profiled_instructions_proto;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
profiled_instructions_text_proto, &profiled_instructions_proto));
auto sched_config = GetDefaultSchedConfig();
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
profiled_instructions_proto);
HloInstruction* rs_start =
FindInstruction(hlo_module.get(), "reduce-scatter-start");
HloInstruction* rs_done =
FindInstruction(hlo_module.get(), "reduce-scatter-done");
HloGraphNode rs_start_node = HloGraphNode(rs_start, 0);
HloGraphNode rs_done_node = HloGraphNode(rs_done, 1);
double latency =
latency_estimator->GetLatencyBetween(rs_start_node, rs_done_node);
EXPECT_EQ(latency, 120.0);
}
TEST_F(ProfileGuidedLatencyEstimatorTest,
TestProfileGuidedLatencyEstimatorWithP2pInstruction) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
after-all.1 = token[] after-all()
send.7.0 = (f32[16,64,256]{2,1,0}, u32[], token[]) send(p0, after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_source_target_pairs="{{0,1}}"}
send-done.7.0 = token[] send-done(send.7.0), channel_id=1
recv.7.0 = (f32[16,64,256]{2,1,0}, u32[], token[]) recv(after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_source_target_pairs="{{0,1}}"}
recv-done.7.0 = (f32[16,64,256]{2,1,0}, token[]) recv-done(recv.7.0), channel_id=1
ROOT recv-data = f32[16,64,256]{2,1,0} get-tuple-element(recv-done.7.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_TRUE(hlo_module->has_entry_computation());
std::string profiled_instructions_text_proto = R"pb(
costs { name: "send.7.0" cost_us: 110.0 }
costs { name: "recv.7.0" cost_us: 100.0 }
)pb";
;
tensorflow::profiler::ProfiledInstructionsProto profiled_instructions_proto;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
profiled_instructions_text_proto, &profiled_instructions_proto));
auto sched_config = GetDefaultSchedConfig();
sched_config.schedule_send_recvs = true;
auto latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
sched_config, std::make_unique<ApproximateLatencyEstimator>(),
profiled_instructions_proto);
HloInstruction* send_start = FindInstruction(hlo_module.get(), "send.7.0");
HloInstruction* send_done =
FindInstruction(hlo_module.get(), "send-done.7.0");
HloInstruction* recv_start = FindInstruction(hlo_module.get(), "recv.7.0");
HloInstruction* recv_done =
FindInstruction(hlo_module.get(), "recv-done.7.0");
HloGraphNode send_start_node = HloGraphNode(send_start, 0);
HloGraphNode send_done_node = HloGraphNode(send_done, 1);
HloGraphNode recv_start_node = HloGraphNode(recv_start, 2);
HloGraphNode recv_done_node = HloGraphNode(recv_done, 3);
double send_latency =
latency_estimator->GetLatencyBetween(send_start_node, send_done_node);
double recv_latency =
latency_estimator->GetLatencyBetween(recv_start_node, recv_done_node);
EXPECT_EQ(send_latency, 110.0);
EXPECT_EQ(recv_latency, 100.0);
}
} |
1,948 | cpp | tensorflow/tensorflow | reduce_scatter_combiner | third_party/xla/xla/service/reduce_scatter_combiner.cc | third_party/xla/xla/service/reduce_scatter_combiner_test.cc | #ifndef XLA_SERVICE_REDUCE_SCATTER_COMBINER_H_
#define XLA_SERVICE_REDUCE_SCATTER_COMBINER_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReduceScatterCombiner : public HloModulePass {
public:
ReduceScatterCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count, bool combine_by_dim);
absl::string_view name() const override { return "reduce-scatter-combiner"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t combine_threshold_in_bytes_;
int64_t combine_threshold_count_;
bool combine_by_dim_;
};
}
#endif
#include "xla/service/reduce_scatter_combiner.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
int64_t FindMostFrequentScatterDim(
absl::Span<HloInstruction* const> to_combine) {
assert(!to_combine.empty());
int64_t min_rank = std::numeric_limits<int64_t>::max();
std::vector<int64_t> frequency;
for (const HloInstruction* it : to_combine) {
int64_t dim = Cast<HloReduceScatterInstruction>(it)->scatter_dimension();
frequency.resize(std::max(dim + 1, static_cast<int64_t>(frequency.size())),
0);
frequency[dim]++;
min_rank = std::min(min_rank, it->shape().rank());
}
int64_t most_frequent_dim = std::distance(
frequency.begin(), std::max_element(frequency.begin(), frequency.end()));
return most_frequent_dim < min_rank ? most_frequent_dim : 0;
}
using ReduceScatterKey =
std::tuple<AllReduceKey, int64_t>;
absl::Status CombineReduceScatters(
absl::Span<HloInstruction* const> to_combine) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " reduce-scatter ops";
HloComputation& computation = *to_combine.back()->parent();
HloComputation* reduction = to_combine[0]->to_apply();
std::optional<ReductionKind> first_reduction_kind =
MatchReductionComputation(reduction);
TF_RET_CHECK(first_reduction_kind);
std::vector<HloInstruction*> operands;
std::vector<std::optional<std::vector<int64_t>>> operand_permutations;
std::vector<Shape> output_shapes;
int64_t most_frequent_dim = FindMostFrequentScatterDim(to_combine);
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kReduceScatter);
const auto* rs = Cast<HloReduceScatterInstruction>(hlo);
TF_RET_CHECK(hlo->operands().size() == 1);
std::optional<ReductionKind> reduction_kind =
MatchReductionComputation(hlo->to_apply());
TF_RET_CHECK(reduction_kind);
TF_RET_CHECK(*reduction_kind == *first_reduction_kind);
TF_RET_CHECK(hlo->shape().IsArray());
HloInstruction* operand = hlo->operands().front();
operands.push_back(operand);
operand_permutations.emplace_back();
output_shapes.push_back(hlo->shape());
if (rs->scatter_dimension() != most_frequent_dim) {
const Shape& operand_shape = operand->shape();
auto& perm = operand_permutations.back();
perm = std::vector<int64_t>(operand_shape.rank());
std::iota(perm->begin(), perm->end(), 0);
std::swap((*perm)[most_frequent_dim], (*perm)[rs->scatter_dimension()]);
operands.back() =
computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*perm, operand_shape), operand));
output_shapes.back() = ShapeUtil::PermuteDimensions(*perm, hlo->shape());
}
}
HloInstruction* combined;
TF_RET_CHECK(operands.size() >= 2);
combined = computation.AddInstruction(HloInstruction::CreateReduceScatter(
ShapeUtil::MakeTupleShape(output_shapes), operands, reduction,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloReduceScatterInstruction>(to_combine.front())
->use_global_device_ids(),
most_frequent_dim));
if (to_combine.front()->has_sharding()) {
combined->set_sharding(to_combine.front()->sharding());
}
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
HloInstruction* replacement = computation.AddInstruction(
HloInstruction::CreateGetTupleElement(combined, i));
if (operand_permutations[i]) {
replacement = computation.AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::PermuteDimensions(*operand_permutations[i],
replacement->shape()),
replacement));
}
TF_RETURN_IF_ERROR(
computation.ReplaceInstruction(to_combine[i], replacement));
}
return absl::OkStatus();
}
}
ReduceScatterCombiner::ReduceScatterCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count,
bool combine_by_dim)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count),
combine_by_dim_(combine_by_dim) {}
absl::StatusOr<bool> ReduceScatterCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running ReduceScatterCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip ReduceScatterCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedCollective(
*module, HloOpcode::kReduceScatter)) {
VLOG(1) << "Skip ReduceScatterCombiner because the module contains "
"reduce-scatter with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn = [&domain_map, this](const HloInstruction* instruction)
-> std::optional<ReduceScatterKey> {
auto* rs = DynCast<HloReduceScatterInstruction>(instruction);
std::optional<AllReduceKey> key =
GetAllReduceKey(instruction, domain_map.get());
if (!rs || !key) {
return std::nullopt;
}
if (!MatchReductionComputation(rs->to_apply())) {
return std::nullopt;
}
int64_t rs_dim_key = this->combine_by_dim_ ? rs->scatter_dimension() : -1;
return ReduceScatterKey{std::move(*key), rs_dim_key};
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<ReduceScatterKey>(
computation, key_fn, &CombineReduceScatters,
combine_threshold_in_bytes_, combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/reduce_scatter_combiner.h"
#include <cstddef>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr int64_t kMaxCombineCount = 256;
constexpr int64_t kMaxByteCount = 10 * 1024 * 1024;
class ReduceScatterCombinerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change,
int64_t byte_threshold = kMaxByteCount,
int64_t count_threshold = kMaxCombineCount, bool combine_by_dim = true) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
VLOG(1) << "Before running ReduceScatterCombiner: "
<< ReduceScatterCount(module.get()) << " reduce-scatter ops";
auto changed =
ReduceScatterCombiner(byte_threshold, count_threshold, combine_by_dim)
.Run(module.get());
if (!changed.ok()) {
return changed.status();
}
VLOG(1) << "After running ReduceScatterCombiner: "
<< ReduceScatterCount(module.get()) << " reduce-scatter ops";
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t ReduceScatterCount(HloModule *module) {
int64_t sum = 0;
for (auto comp : module->computations()) {
sum += absl::c_count_if(comp->instructions(),
HloPredicateIsOp<HloOpcode::kReduceScatter>);
}
return sum;
}
};
TEST_F(ReduceScatterCombinerTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4], f32[4]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, SimpleMultipleGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8, 8] parameter(1)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4, 8] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs2 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs3 = f32[8, 4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
ROOT t = (f32[4, 8], f32[4, 8], f32[8, 4], f32[8, 4])
tuple(rs0, rs1, rs2, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(ReduceScatterCount(module.get()), 2);
}
TEST_F(ReduceScatterCombinerTest, DifferentDimensions) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8, 8] parameter(1)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4, 8] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs2 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs3 = f32[8, 4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
ROOT t = (f32[4, 8], f32[4, 8], f32[8, 4], f32[8, 4])
tuple(rs0, rs1, rs2, rs3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunPass(hlo_string, true, kMaxByteCount,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, DifferentDimensionsAndRanks) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs1 = f32[8, 4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={1},
to_apply=sum
rs2 = f32[4] reduce-scatter(p1), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[8, 4], f32[8, 4], f32[4])
tuple(rs0, rs1, rs2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunPass(hlo_string, true, kMaxByteCount,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
TEST_F(ReduceScatterCombinerTest, DependentReduceScatter) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8, 8] parameter(0)
rs0 = f32[4, 8] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[2, 8] reduce-scatter(rs0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4, 8], f32[2, 8]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, DoNotCombineMismatched) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
rs0 = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0},
to_apply=sum
rs1 = f32[4] reduce-scatter(p1), replica_groups={{1,0}}, dimensions={0},
to_apply=sum
ROOT t = (f32[4], f32[4]) tuple(rs0, rs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, DoNotCombineWithoutReductionKind) {
absl::string_view hlo_string = R"(
HloModule TestModule
region_0 {
Arg_1 = bf16[] parameter(1)
Arg_0 = bf16[] parameter(0)
convert_1 = f32[] convert(Arg_1)
convert_0 = f32[] convert(Arg_0)
add0 = f32[] add(convert_1, convert_0)
ROOT convert_2 = bf16[] convert(add0)
}
region_1 {
Arg_1 = bf16[] parameter(1)
Arg_0 = bf16[] parameter(0)
convert_1 = f32[] convert(Arg_1)
convert_0 = f32[] convert(Arg_0)
add0 = f32[] add(convert_1, convert_0)
ROOT convert_2 = bf16[] convert(add0)
}
ENTRY entry{
param0 = bf16[512,256]{1,0} parameter(0)
param1 = bf16[512,256]{1,0} parameter(1)
reduce-scatter.0 = bf16[512,256]{1,0} reduce-scatter(param0),
replica_groups={{0}}, dimensions={0}, to_apply=region_0
reduce-scatter.1 = bf16[512,256]{1,0} reduce-scatter(param1),
replica_groups={{0}}, dimensions={0}, to_apply=region_1
ROOT add.0 = tuple(reduce-scatter.0, reduce-scatter.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(ReduceScatterCombinerTest, HighThreshold) {
absl::string_view hlo_string = R"(
HloModule m
sum_reduce {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
ENTRY main {
param.0 = bf16[1024,32768]{1,0} parameter(0)
param.1 = bf16[4096,8192]{1,0} parameter(1)
param.2 = bf16[3,128,64,1024]{2,1,0,3}parameter(2)
param.3 = bf16[1024,128,64]{2,1,0} parameter(3)
reduce-scatter.19 = bf16[1024,32768]{1,0} reduce-scatter(param.0),
channel_id=132, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
reduce-scatter.21 = bf16[4096,8192]{1,0} reduce-scatter(param.1),
channel_id=134, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
reduce-scatter.23 = bf16[3,128,64,1024]{2,1,0,3} reduce-scatter(param.2),
channel_id=136, replica_groups={{0}}, dimensions={3}, to_apply=sum_reduce
reduce-scatter.25 = bf16[1024,128,64]{2,1,0} reduce-scatter(param.3),
channel_id=138, replica_groups={{0}}, dimensions={0}, to_apply=sum_reduce
ROOT tuple = tuple(reduce-scatter.19, reduce-scatter.21, reduce-scatter.23,
reduce-scatter.25)
})";
int64_t combined_bytes = 67108864 + 67108864 + 50331648 + 16777216;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunPass(hlo_string, true,
combined_bytes,
kMaxCombineCount, false));
EXPECT_EQ(ReduceScatterCount(module.get()), 1);
}
}
} |
1,949 | cpp | tensorflow/tensorflow | hlo_rematerialization | third_party/xla/xla/service/hlo_rematerialization.cc | third_party/xla/xla/service/hlo_rematerialization_test.cc | #ifndef XLA_SERVICE_HLO_REMATERIALIZATION_H_
#define XLA_SERVICE_HLO_REMATERIALIZATION_H_
#include <optional>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
namespace xla {
class HloRematerialization : public HloModulePass {
public:
using ShapeSizeFunction = std::function<int64_t(const Shape&)>;
using CompactShapeFunction =
std::function<absl::StatusOr<Shape>(const Shape&)>;
struct RematerializationSizes {
int64_t before_bytes = -1;
int64_t after_bytes = -1;
};
struct RematerializationModeConfig {
RematerializationModeConfig(bool recompute, bool compress,
bool host_offload)
: recompute(recompute),
compress(compress),
host_offload(host_offload) {}
bool recompute;
bool compress;
bool host_offload;
};
struct HostMemoryOffloadConfig {
explicit HostMemoryOffloadConfig(int64_t host_memory_space,
float bandwidth_to_host_bytes_per_second,
float bandwidth_from_host_bytes_per_second)
: host_memory_space(host_memory_space),
bandwidth_to_host_bytes_per_second(
bandwidth_to_host_bytes_per_second),
bandwidth_from_host_bytes_per_second(
bandwidth_from_host_bytes_per_second) {}
int64_t host_memory_space;
float bandwidth_to_host_bytes_per_second;
float bandwidth_from_host_bytes_per_second;
};
static Shape DefaultCompactShapeFunction(const Shape& shape) { return shape; }
struct Options {
explicit Options(HloCostAnalysis& hlo_cost_analysis,
const RematerializationModeConfig& remat_mode_config,
int64_t memory_limit_bytes, int block_size_limit,
int block_rematerialization_factor, int64_t min_remat_size,
CompactShapeFunction compact_shape_function,
std::optional<HostMemoryOffloadConfig>
host_memory_offload_config = std::nullopt,
absl::flat_hash_map<HloComputation*, int64_t>
async_computation_parallelism = {})
: hlo_cost_analysis(hlo_cost_analysis),
remat_mode_config(remat_mode_config),
memory_limit_bytes(memory_limit_bytes),
block_size_limit(block_size_limit),
block_rematerialization_factor(block_rematerialization_factor),
min_remat_size(min_remat_size),
compact_shape_function(compact_shape_function == nullptr
? DefaultCompactShapeFunction
: std::move(compact_shape_function)),
host_memory_offload_config(host_memory_offload_config),
async_computation_parallelism(async_computation_parallelism) {}
HloCostAnalysis& hlo_cost_analysis;
RematerializationModeConfig remat_mode_config;
const ShapeSizeFunction size_function;
int64_t memory_limit_bytes;
int block_size_limit;
int block_rematerialization_factor;
int64_t min_remat_size;
CompactShapeFunction compact_shape_function;
std::optional<HostMemoryOffloadConfig> host_memory_offload_config;
absl::flat_hash_map<HloComputation*, int64_t> async_computation_parallelism;
};
explicit HloRematerialization(Options options, RematerializationSizes& sizes)
: options_(std::move(options)), sizes_(sizes) {}
~HloRematerialization() override = default;
absl::string_view name() const override { return "rematerialization"; }
int64_t NextChannelId() { return next_channel_id_++; }
int64_t ComputationPeakMemory(const HloComputation* computation) const {
return computation_peak_memory_.at(computation);
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
absl::StatusOr<bool> RematerializeComputation(HloComputation* computation,
HloSchedule* schedule,
int64_t memory_limit_bytes,
int64_t min_remat_size) {
return RematerializeComputation(computation, schedule, memory_limit_bytes,
min_remat_size, {});
}
virtual absl::StatusOr<bool> RematerializeComputation(
HloComputation* computation, HloSchedule* schedule,
int64_t memory_limit_bytes, int64_t min_remat_size,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::StatusOr<int64_t> ComputePeakMemory(
const HloComputation* computation, const HloInstructionSequence& order,
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
absl::StatusOr<int64_t> CalledComputationsMemoryUsage(
const HloInstruction* instruction,
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
const Options options_;
RematerializationSizes& sizes_;
std::unique_ptr<CallGraph> call_graph_;
absl::flat_hash_map<const HloComputation*, int64_t> computation_peak_memory_;
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
absl::flat_hash_set<const HloComputation*> rematerialized_computations_;
int64_t instructions_rematerialized_ = 0;
int64_t net_instructions_added_ = 0;
int max_rematerialized_block_size_ = 0;
int64_t next_channel_id_;
};
}
#endif
#include "xla/service/hlo_rematerialization.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/map_util.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
using ::tsl::strings::HumanReadableNumBytes;
bool IsRematerializable(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kCopy) {
if (LayoutUtil::Equal(instruction->shape().layout(),
instruction->operand(0)->shape().layout())) {
return false;
}
}
if (auto collective = DynCast<HloCollectiveInstruction>(instruction)) {
return !collective->constrain_layout();
}
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConstant:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kParameter:
case HloOpcode::kWhile:
return false;
default:
return !instruction->HasSideEffect();
}
}
bool CanBeRematerialized(
const HloInstruction* instruction,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) {
auto it = rematerializable_map->find(instruction);
if (it != rematerializable_map->end()) {
return it->second;
}
bool rematerializable = IsRematerializable(instruction);
(*rematerializable_map)[instruction] = rematerializable;
return rematerializable;
}
bool IsSupportedIndirectUser(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kGetTupleElement;
}
using BufferId = int64_t;
using BufferIdList = absl::InlinedVector<BufferId, 3>;
struct RematStrategy {
enum {
kRecompute,
kCompress,
kHostOffload,
} kind;
Shape compact_shape;
};
struct Item {
HloInstruction* instruction;
bool placed = false;
bool denylisted = false;
BufferIdList buffers_defined;
BufferIdList buffers_output;
BufferIdList buffers_used;
bool is_skip_node = false;
private:
friend class InstructionList;
Item* next = nullptr;
Item* prev = nullptr;
Item* prev_skip_node = nullptr;
Item* next_skip_node = nullptr;
int64_t position;
};
struct ItemUse {
Item* user;
int64_t operand_number;
std::optional<int64_t> index;
ItemUse(Item* user, int64_t op_num, std::optional<int64_t> index)
: user(user), operand_number(op_num), index(index) {}
bool operator==(const ItemUse& other) const {
return user == other.user && operand_number == other.operand_number &&
index == other.index;
}
};
using ItemList = absl::InlinedVector<Item*, 3>;
using UsesList = absl::InlinedVector<ItemUse, 3>;
class InstructionList {
public:
explicit InstructionList(const HloInstructionSequence& order) {
int64_t position = 0;
Item* last = nullptr;
last_skip_node_ = nullptr;
first_skip_node_ = nullptr;
for (HloInstruction* inst : order.instructions()) {
Item* item = new Item;
item->next = nullptr;
item->prev = last;
if (last == nullptr) {
first_ = item;
} else {
last->next = item;
}
last = item;
item->instruction = inst;
item->position = position;
position++;
item_map_[inst] = item;
}
}
~InstructionList() {
for (Item* item = first_; item != nullptr;) {
Item* next = item->next;
delete item;
item = next;
}
}
size_t size() const { return item_map_.size(); }
Item* first() const { return first_; }
Item* next(Item* item) const { return item->next; }
const Item* next(const Item* item) const { return item->next; }
Item* prev(Item* item) const { return item->prev; }
const Item* prev(const Item* item) const { return item->prev; }
Item* first_skip_node() const { return first_skip_node_; }
Item* next_skip_node(Item* item) const { return item->next_skip_node; }
Item* CreateItem(HloInstruction* inst) {
Item* item = new Item;
item->instruction = inst;
CHECK(item_map_.insert({inst, item}).second)
<< "inserting inst twice " << inst->name();
return item;
}
Item* GetItem(const HloInstruction* inst) const {
auto iter = item_map_.find(inst);
CHECK(iter != item_map_.end()) << "Did not find " << inst->name();
return iter->second;
}
void InsertBeforeInstructions(Item* to_insert,
absl::Span<Item* const> before_instructions) {
VLOG(3) << "InsertBeforeInstructions: " << to_insert->instruction->name()
<< " before {"
<< absl::StrJoin(before_instructions, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< "}";
CHECK(!before_instructions.empty());
Item* min_position_item = nullptr;
for (Item* item : before_instructions) {
if (min_position_item == nullptr ||
item->position < min_position_item->position) {
min_position_item = item;
}
}
while (min_position_item->prev != nullptr &&
min_position_item->position == min_position_item->prev->position) {
min_position_item = min_position_item->prev;
}
while (!absl::c_linear_search(before_instructions, min_position_item)) {
min_position_item = min_position_item->next;
}
return InsertBefore(to_insert, min_position_item);
}
void PromoteNodesToSkip(absl::FunctionRef<bool(Item*)> should_promote) {
int64_t count = 0;
for (auto* item = first(); item != nullptr; item = next(item)) {
if (should_promote(item)) {
count += 1;
if (first_skip_node_ == nullptr) {
first_skip_node_ = item;
}
item->is_skip_node = true;
item->prev_skip_node = last_skip_node_;
if (last_skip_node_ != nullptr) {
last_skip_node_->next_skip_node = item;
}
last_skip_node_ = item;
}
}
VLOG(1) << " Rematerialization has " << count << " items in express lane";
}
void InsertAfterInstructions(Item* to_insert,
absl::Span<Item* const> after_instructions) {
VLOG(3) << "InsertAfterInstructions: " << to_insert->instruction->name()
<< " after {"
<< absl::StrJoin(after_instructions, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< "}";
CHECK(!after_instructions.empty());
Item* max_position_item = nullptr;
for (Item* item : after_instructions) {
if (max_position_item == nullptr ||
item->position > max_position_item->position) {
max_position_item = item;
}
}
CHECK(max_position_item->next != nullptr);
InsertBeforeInstructions(to_insert, {max_position_item->next});
}
void Denylist(const HloInstruction* inst) {
GetItem(inst)->denylisted = true;
}
private:
void InsertBefore(Item* item, Item* before) {
VLOG(3) << "InsertBefore: " << item->instruction->name() << " before "
<< before->instruction->name();
item->is_skip_node = true;
Item* cursor = before;
while (cursor != nullptr && !cursor->is_skip_node) {
cursor = cursor->next;
}
CHECK(cursor == nullptr || cursor->is_skip_node);
if (cursor == nullptr) {
item->prev_skip_node = last_skip_node_;
item->next_skip_node = nullptr;
last_skip_node_ = item;
} else {
CHECK(cursor->is_skip_node);
item->prev_skip_node = cursor->prev_skip_node;
if (item->prev_skip_node != nullptr) {
item->prev_skip_node->next_skip_node = item;
}
item->next_skip_node = cursor;
cursor->prev_skip_node = item;
}
if (first_skip_node_ == cursor) {
first_skip_node_ = item;
}
item->prev = before->prev;
item->next = before;
before->prev = item;
if (item->prev != nullptr) {
item->prev->next = item;
} else {
first_ = item;
}
item->position = before->position;
}
Item* first_;
Item* first_skip_node_;
Item* last_skip_node_;
absl::flat_hash_map<const HloInstruction*, Item*> item_map_;
};
UsesList GetUsers(const InstructionList& instruction_list,
const LogicalBuffer* logical_buffer,
const TuplePointsToAnalysis& points_to_analysis,
bool* has_indirect_users) {
UsesList users;
*has_indirect_users = false;
for (const BufferAlias& buffer_alias :
points_to_analysis.GetBufferAliases(*logical_buffer)) {
for (const HloInstruction* user : buffer_alias.instruction()->users()) {
if (points_to_analysis.DoesNotUseOperandBuffer(
buffer_alias.instruction(), buffer_alias.index(), user)) {
continue;
}
if (buffer_alias.instruction() != logical_buffer->instruction() &&
!IsSupportedIndirectUser(buffer_alias.instruction())) {
*has_indirect_users = true;
}
Item* user_item = instruction_list.GetItem(user);
std::optional<int64_t> user_index =
logical_buffer->index().size() != 1
? std::nullopt
: std::make_optional(logical_buffer->index().back());
for (int64_t op_idx : user->OperandIndices(buffer_alias.instruction())) {
if (!absl::c_linear_search(
users,
ItemUse{user_item, static_cast<int>(op_idx), user_index})) {
users.push_back(
ItemUse{user_item, static_cast<int>(op_idx), user_index});
}
}
}
}
return users;
}
class MemoryUsageTracker {
public:
MemoryUsageTracker(const HloRematerialization::Options& options,
const HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const InstructionList& instruction_list);
absl::Status BeginInstruction(Item* item);
int64_t RematerializationCost(const std::vector<Item*>& items,
int64_t memory_reduced,
int64_t memory_limit_bytes) const {
bool zero_cost_move = true;
for (auto* item : items) {
auto* instruction = item->instruction;
if (absl::c_any_of(
instruction->users(),
[this](const HloInstruction* inst) { return IsPlaced(inst); })) {
zero_cost_move = false;
break;
}
}
if (zero_cost_move) {
return 0;
}
CHECK_GT(memory_reduced, 0);
return memory_limit_bytes / memory_reduced;
}
absl::Status EndInstruction();
int64_t MemoryReducedIfCompressed(const Item* item,
const Shape& compact_shape) const;
int64_t MemoryReducedIfRematerialized(
absl::Span<const Item* const> items) const;
absl::Status AddCompressInstructions(Item* original_item,
Item* compressed_item,
Item* uncompressed_item);
absl::Status AddRematerializedInstruction(Item* original_item, | #include "xla/service/hlo_rematerialization.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_rematerialization_test_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
class AsyncRematerializationTest : public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(
int64_t memory_limit_bytes, HloModule* module,
const absl::flat_hash_map<HloComputation*, int64_t>&
async_computation_parallelism,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
HloRematerialization::RematerializationModeConfig config(
true, true, false);
auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
std::nullopt,
async_computation_parallelism);
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module, {HloInstruction::kMainExecutionThread});
}
static constexpr int64_t kNumParallelThreads = 16;
};
TEST_F(AsyncRematerializationTest, AsyncComputation) {
constexpr std::string_view hlo = R"(
HloModule async, is_scheduled=true
%offload_computation {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}
%negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)
%concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}
%slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
%main_computation {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}
%negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)
%concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}
%slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
ENTRY %main {
%param = f32[1]{0} parameter(0)
%call-start = ((f32[1]{0}), f32[1]{0}, s32[]) call-start(f32[1]{0} %param), to_apply=%offload_computation, async_execution_thread="offload"
%call-done = f32[1]{0} call-done(((f32[1]{0}), f32[1]{0}, s32[]) %call-start)
ROOT %call = f32[1]{0} call(f32[1]{0} %call-done), to_apply=%main_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
HloInstruction* call_start = FindInstruction(module.get(), "call-start");
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloRematerialization(
kNumParallelThreads * 16 * 1024 + 14 * 1024,
module.get(),
{{call_start->async_wrapped_computation(), kNumParallelThreads}}));
EXPECT_TRUE(changed);
}
class RecomputeAndCompressHloRematerializationTest
: public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
for (const HloComputation* computation : module->computations()) {
before_computation_names_.insert(computation->name());
for (const HloInstruction* instruction : computation->instructions()) {
before_instruction_names_.insert(instruction->name());
}
}
HloRematerialization::RematerializationModeConfig config(
true, true, false);
auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
std::nullopt,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
absl::StatusOr<bool> result = remat.Run(module);
for (const HloComputation* computation : module->computations()) {
if (!before_computation_names_.contains(computation->name())) {
continue;
}
for (const HloInstruction* instruction : computation->instructions()) {
after_instruction_names_.insert(instruction->name());
}
}
return result;
}
void CheckForRematInInstructionNames(absl::string_view test_case_name) {
constexpr const absl::string_view kRematInstructionNameMustContain =
".remat";
for (const auto& instruction_name : after_instruction_names_) {
if (!before_instruction_names_.contains(instruction_name)) {
EXPECT_TRUE(absl::StrContains(instruction_name,
kRematInstructionNameMustContain))
<< "[" << test_case_name << "] Instruction \"" << instruction_name
<< "\" must contain \"" << kRematInstructionNameMustContain << "\"";
}
}
}
private:
absl::flat_hash_set<absl::string_view> before_computation_names_;
absl::flat_hash_set<absl::string_view> before_instruction_names_;
absl::flat_hash_set<absl::string_view> after_instruction_names_;
};
TEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* remat_bcast = concat->operand(0);
EXPECT_THAT(remat_bcast, op::Broadcast(::testing::Ne(bcast)));
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 2],
concat);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 3],
remat_bcast);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
SingleComputationNoWorthRemat) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get(),
14 * 1024));
EXPECT_FALSE(changed);
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
SingleComputationNoRematerialization) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
EXPECT_EQ(computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
20 * 1024, module.get()));
EXPECT_FALSE(changed);
EXPECT_EQ(computation->instruction_count(), 8);
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeAroundWhile) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* body_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".body"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond, body_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
17 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 8);
EXPECT_EQ(body_computation->instruction_count(), 8);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematerializeEntryAndWhileBody) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* body_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".body"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond, body_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
15 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(body_computation->instruction_count(), 9);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematerializeNestedComputations) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* while_cond_copy =
module->AddEmbeddedComputation(while_cond->Clone());
HloComputation* inner_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".inner"));
HloComputation* middle_computation =
module->AddEmbeddedComputation(MakeRematerializableWhileComputation(
while_cond, inner_computation,
".middle"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond_copy, middle_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(middle_computation->instruction_count(), 7);
EXPECT_EQ(inner_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
13 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(middle_computation->instruction_count(), 9);
EXPECT_EQ(inner_computation->instruction_count(), 9);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RngNotRematerialized) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
vec1024_shape_, RandomDistribution::RNG_UNIFORM, {param, param}));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kTanh, rng));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kExp, rng));
auto add_0 = builder.AddInstruction(
HloInstruction::CreateBinary(vec1024_shape_, HloOpcode::kAdd, rng, tanh));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, rng,
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, exp, add_0))));
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, rng,
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, tanh, add_1))));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto count_rngs = [](const HloComputation* computation) {
int64_t rng_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kRng) {
++rng_count;
}
}
return rng_count;
};
ASSERT_EQ(count_rngs(entry_computation), 1);
const int64_t original_instruction_count =
entry_computation->instruction_count();
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloRematerialization(
4 * ByteSizeOf(vec1024_shape_), module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(count_rngs(entry_computation), 1);
EXPECT_GT(entry_computation->instruction_count(), original_instruction_count);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
InstructionRematerializedMultipleTimes) {
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation = nullptr;
{
auto builder = HloComputation::Builder(TestName() + ".subcomputation");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},
0));
builder.AddInstruction(HloInstruction::CreateSlice(
vec1024_shape_, concat, {0},
{1024}, {1}));
subcomputation = module->AddEmbeddedComputation(builder.Build());
}
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, bcast));
auto call_1 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));
auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_1));
auto call_2 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_2}, subcomputation));
auto add_3 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_2));
auto call_3 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_3}, subcomputation));
auto add_4 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_3));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto count_broadcasts = [](const HloComputation* computation) {
int64_t bcast_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
bcast_count++;
}
}
return bcast_count;
};
EXPECT_EQ(count_broadcasts(entry_computation), 1);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(add_2->operand(0), bcast);
EXPECT_EQ(add_3->operand(0), bcast);
EXPECT_EQ(add_4->operand(0), bcast);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
22 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(count_broadcasts(entry_computation), 4);
EXPECT_EQ(entry_computation->instruction_count(), 12);
EXPECT_NE(add_2->operand(0), bcast);
EXPECT_THAT(add_2->operand(0), op::Broadcast(param));
EXPECT_NE(add_3->operand(0), bcast);
EXPECT_THAT(add_3->operand(0), op::Broadcast(param));
EXPECT_NE(add_4->operand(0), bcast);
EXPECT_THAT(add_4->operand(0), op::Broadcast(param));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, CopyNotRematerialized) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kCopy, param));
auto negate_a_1 = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));
auto negate_a_2 = builder.AddInstruction(HloInstruction::CreateUnary(
vec1024_shape_, HloOpcode::kNegate, negate_a_1));
auto negate_b_1 = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));
auto negate_b_2 = builder.AddInstruction(HloInstruction::CreateUnary(
vec1024_shape_, HloOpcode::kNegate, negate_b_1));
builder.AddInstruction(HloInstruction::CreateTuple({negate_a_2, negate_b_2}));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
1 * 1024, module.get()));
auto count_copies = [](const HloComputation* computation) {
int64_t copy_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
copy_count++;
}
}
return copy_count;
};
EXPECT_TRUE(changed);
EXPECT_EQ(count_copies(entry_computation), 1);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, ThroughBitcastRemat) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)
%negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %broadcast)
%concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate, f32[1024,1]{1,0} %negate), dimensions={0}
%slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate), slice={[0:1], [0:1]}
%bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast, f32[1]{0} %bitcast.1), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice,
op::Slice(op::Concatenate(op::Bitcast(op::Broadcast(_)), _)));
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* remat_bitcast = concat->operand(0);
const HloInstruction* remat_broadcast = remat_bitcast->operand(0);
EXPECT_THAT(remat_broadcast, op::Broadcast(::testing::Ne(bcast)));
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 2],
concat);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 3],
remat_bitcast);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 4],
remat_broadcast);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
ThroughBitcastRematInfiniteLoop) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1024] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)
%broadcast2 = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast2 = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast2)
ROOT %add = f32[1024]{0} add(f32[1024]{0} %bitcast, f32[1024]{0} %bitcast2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
1024, module.get()));
ASSERT_THAT(add, op::Add(op::Bitcast(op::Broadcast(_)),
op::Bitcast(op::Broadcast(_))));
EXPECT_TRUE(changed);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShape) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte. |
1,950 | cpp | tensorflow/tensorflow | elemental_ir_emitter | third_party/xla/xla/service/gpu/elemental_ir_emitter.cc | third_party/xla/xla/service/elemental_ir_emitter_test.cc | #ifndef XLA_SERVICE_GPU_ELEMENTAL_IR_EMITTER_H_
#define XLA_SERVICE_GPU_ELEMENTAL_IR_EMITTER_H_
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
class GpuElementalIrEmitter : public ElementalIrEmitter {
public:
GpuElementalIrEmitter(IrEmitterContext& ir_emitter_context,
llvm::IRBuilder<>* b);
protected:
llvm_ir::IrArray::Index GetSourceIndexOfBitcast(
const llvm_ir::IrArray::Index& index, const HloInstruction* hlo) override;
absl::StatusOr<llvm::Value*> EmitFloatBinaryOp(
const HloInstruction* op, llvm::Value* lhs_value,
llvm::Value* rhs_value) override;
absl::StatusOr<llvm::Value*> EmitLog(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitLog1p(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitSin(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitCos(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitTan(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitExp(PrimitiveType prim_type,
llvm::Value* value,
absl::string_view name) override;
absl::StatusOr<llvm::Value*> EmitExpm1(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitSqrt(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitRsqrt(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitPow(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) override;
absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type,
llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) override;
absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitComplexAbs(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<llvm::Value*> EmitCbrt(PrimitiveType prim_type,
llvm::Value* value) override;
absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view, bool ) override;
bool fast_min_max() override {
return ir_emitter_context_.debug_options().xla_gpu_enable_fast_min_max();
}
private:
absl::StatusOr<llvm::Value*> EmitPowerOp(const HloInstruction* op,
llvm::Value* lhs_value,
llvm::Value* rhs_value);
absl::StatusOr<llvm::Value*> EmitDeviceMathCall(
TargetDeviceFunctionID funcid, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name = "");
absl::StatusOr<llvm::Value*> EmitMathCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name = "");
IrEmitterContext& ir_emitter_context_;
};
}
}
#endif
#include "xla/service/gpu/elemental_ir_emitter.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/ModRef.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_nested.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/math_ops.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
GpuElementalIrEmitter::GpuElementalIrEmitter(
IrEmitterContext& ir_emitter_context, llvm::IRBuilder<>* b)
: ElementalIrEmitter(ir_emitter_context.llvm_module(), b),
ir_emitter_context_(ir_emitter_context) {}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitDeviceMathCall(
TargetDeviceFunctionID funcid, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name) {
bool cast_result_to_fp16 = false;
std::vector<llvm::Value*> converted_operands(operands.begin(),
operands.end());
std::vector<PrimitiveType> converted_input_types(input_types.begin(),
input_types.end());
switch (output_type) {
case F16:
cast_result_to_fp16 = true;
for (int64_t i = 0; i < operands.size(); ++i) {
if (input_types[i] == F16) {
converted_operands[i] =
FPCast(converted_operands[i], b()->getFloatTy());
converted_input_types[i] = F32;
}
}
output_type = F32;
[[fallthrough]];
case F32:
break;
case F64:
break;
default:
return Unimplemented("Bad type for device math call: %s",
PrimitiveType_Name(output_type));
}
const std::string& munged_callee = ObtainDeviceFunctionName(
funcid, output_type,
llvm::Triple(b()->GetInsertBlock()->getModule()->getTargetTriple()));
llvm::Value* result = EmitMathCall(munged_callee, converted_operands,
converted_input_types, output_type, name)
.value();
if (cast_result_to_fp16) {
result = FPCast(result, b()->getHalfTy());
}
return result;
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name) {
for (PrimitiveType input_type : input_types) {
if (output_type != input_type) {
return Unimplemented("Input type != output type: %s != %s",
PrimitiveType_Name(input_type),
PrimitiveType_Name(output_type));
}
}
return EmitDeviceFunctionCall(callee_name, operands, input_types, output_type,
llvm::AttrBuilder(b()->getContext())
.addMemoryAttr(llvm::MemoryEffects::none())
.addAttribute(llvm::Attribute::NoUnwind),
b(), name);
}
llvm_ir::IrArray::Index GpuElementalIrEmitter::GetSourceIndexOfBitcast(
const llvm_ir::IrArray::Index& index, const HloInstruction* hlo) {
Shape shape = hlo->shape();
Shape operand_shape = hlo->operand(0)->shape();
auto gpu_config = hlo->backend_config<GpuBackendConfig>();
CHECK_OK(gpu_config);
const BitcastBackendConfig& bitcast_config =
gpu_config.value().bitcast_backend_config();
if (!bitcast_config.result_layout().minor_to_major().empty()) {
*shape.mutable_layout() =
xla::Layout::CreateFromProto(bitcast_config.result_layout());
}
if (!bitcast_config.source_layout().minor_to_major().empty()) {
*operand_shape.mutable_layout() =
xla::Layout::CreateFromProto(bitcast_config.source_layout());
}
return index.SourceIndexOfBitcast(shape, operand_shape, b());
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitFloatBinaryOp(
const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {
PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();
PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();
PrimitiveType output_type = op->shape().element_type();
HloOpcode opcode = op->opcode();
if (ir_emitter_context_.debug_options().xla_gpu_enable_fast_min_max() &&
(opcode == HloOpcode::kMaximum || opcode == HloOpcode::kMinimum)) {
return llvm_ir::EmitCallToIntrinsic(
opcode == HloOpcode::kMaximum ? llvm::Intrinsic::maxnum
: llvm::Intrinsic::minnum,
{lhs_value, rhs_value}, {lhs_value->getType()}, b());
}
if (output_type == F32 &&
ir_emitter_context_.cuda_compute_capability().IsAtLeast(
se::CudaComputeCapability::AMPERE) &&
(opcode == HloOpcode::kMaximum || opcode == HloOpcode::kMinimum)) {
return llvm_ir::EmitCallToIntrinsic(
opcode == HloOpcode::kMaximum ? llvm::Intrinsic::maximum
: llvm::Intrinsic::minimum,
{lhs_value, rhs_value}, {lhs_value->getType()}, b());
}
switch (op->opcode()) {
case HloOpcode::kRemainder: {
return EmitDeviceMathCall(TargetDeviceFunctionID::kFmod,
{lhs_value, rhs_value},
{lhs_input_type, rhs_input_type}, output_type);
}
case HloOpcode::kPower: {
return EmitPowerOp(op, lhs_value, rhs_value);
}
default:
return ElementalIrEmitter::EmitFloatBinaryOp(op, lhs_value, rhs_value);
}
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPowerOp(
const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {
CHECK_EQ(op->opcode(), HloOpcode::kPower);
PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();
PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();
PrimitiveType output_type = op->shape().element_type();
return EmitDeviceMathCall(TargetDeviceFunctionID::kPow,
{lhs_value, rhs_value},
{lhs_input_type, rhs_input_type}, output_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kLog, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog1p(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kLog1p, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSin(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kSin, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCos(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kCos, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTan(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kTan, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExp(
PrimitiveType prim_type, llvm::Value* value, absl::string_view ) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kExp, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExpm1(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kExpm1, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPow(
PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kPow, {lhs, rhs},
{prim_type, prim_type}, prim_type, name);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSqrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kSqrt, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitRsqrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kRsqrt, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitAtan2(
PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kAtan2, {lhs, rhs},
{prim_type, prim_type}, prim_type, name);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTanh(
PrimitiveType prim_type, llvm::Value* value) {
if (prim_type == F64) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kTanh, {value},
{prim_type}, prim_type);
}
llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();
llvm::Value* input = FPCast(value, type);
constexpr double kMaxValue = 20.0;
auto max_value = llvm::ConstantFP::get(type, kMaxValue);
llvm::Value* abs_value =
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {input}, {type}, b());
llvm::Value* fast_tanh = llvm_ir::EmitFastTanh(b(), input);
auto one = llvm::ConstantFP::get(type, 1.0);
auto one_with_sign = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::copysign,
{one, input}, {type}, b());
return FPCast(Select(FCmpULT(abs_value, max_value), fast_tanh, one_with_sign),
value->getType(), "tanh");
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitErf(
PrimitiveType prim_type, llvm::Value* value) {
if (prim_type == F64) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kErf, {value},
{prim_type}, prim_type);
}
llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();
if (type == b()->getFloatTy()) {
llvm::Value* x = FPCast(value, type);
auto* result = llvm_ir::EmitErfF32(b(), x);
return FPCast(result, value->getType());
}
return Unimplemented("erf");
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitComplexAbs(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kHypot,
{EmitExtractReal(value), EmitExtractImag(value)},
{prim_type, prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCbrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kCbrt, {value}, {prim_type},
prim_type);
}
absl::StatusOr<std::vector<llvm::Value*>>
GpuElementalIrEmitter::EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view, bool ) {
return CallNestedComputationWithScalars(b(), ir_emitter_context_, callee,
parameters);
}
}
} | #include "xla/service/elemental_ir_emitter.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::nullopt;
class ElementalIrEmitterExecutionTest : public HloTestBase {
protected:
void RunTest(const std::string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), args, nullopt));
}
void RunTypeConversionTest(absl::string_view hlo_text) {
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
};
class ElementalIrEmitterExecutionTestWithoutFastMinMax
: public ElementalIrEmitterExecutionTest {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
ElementalIrEmitterExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_cpu_enable_fast_min_max(false);
debug_options.set_xla_gpu_enable_fast_min_max(false);
return debug_options;
}
};
XLA_TEST_F(ElementalIrEmitterExecutionTest, DotFusion) {
const std::string hlo_text = R"(
HloModule FusedDot
fused_computation {
arg0 = s32[1,2,1]{2,1,0} parameter(0)
reshape.lhs = s32[2,1]{1,0} reshape(arg0)
arg1 = s32[1,2,1]{2,1,0} parameter(1)
reshape.rhs = s32[2,1]{1,0} reshape(arg1)
ROOT dot = s32[1,1]{1,0} dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY main {
entry_arg0 = s32[1,2,1]{2,1,0} parameter(0)
entry_arg1 = s32[1,2,1]{2,1,0} parameter(1)
ROOT fusion = s32[1,1]{1,0} fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation
}
)";
Literal lhs = LiteralUtil::CreateR3<int32_t>({{{1}, {2}}});
Literal rhs = LiteralUtil::CreateR3<int32_t>({{{3}, {4}}});
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ScalarDotFusion) {
const char* hlo_text = R"(
HloModule ScalarDotFusion
fused_computation {
arg0 = s32[2,2]{1,0} parameter(0)
reshape.lhs = s32[4]{0} reshape(arg0)
arg1 = s32[2,2]{1,0} parameter(1)
reshape.rhs = s32[4]{0} reshape(arg1)
ROOT dot = s32[] dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY main {
entry_arg0 = s32[2,2]{1,0} parameter(0)
entry_arg1 = s32[2,2]{1,0} parameter(1)
ROOT fusion = s32[] fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation
}
)";
Literal lhs = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}});
Literal rhs = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}});
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, BatchDot) {
const char* hlo_text = R"(
HloModule BatchDot
fused_computation.1 {
param_0 = f64[1,1,8]{2,1,0} parameter(0)
r.1 = f64[2,4]{1,0} reshape(param_0)
param_1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)
r.2 = f64[2,4,1]{2,1,0} reshape(param_1)
ROOT dot = f64[2,1]{1,0} dot(r.1, r.2), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
ENTRY resampler_Resampler.49 {
p0 = f64[1,1,8]{2,1,0} parameter(0)
p1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)
ROOT f = f64[2,1]{1,0} fusion(p0, p1), kind=kLoop, calls=fused_computation.1
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("layout-assignment");
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{4e-3, 4e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithInfiniteNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[8]{0} constant({
(1, 1), (1, inf), (1, inf), (nan, 1),
(inf, inf), (inf, nan), (nan, nan), (1, 2)})
real = f32[8]{0} constant({nan, nan, inf, inf, inf, 1, inf, 3})
imag = f32[8]{0} constant({inf, inf, inf, inf, 1, inf, inf, 4})
complex.2 = c64[8]{0} complex(real, imag)
ROOT divide.1 = c64[8]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithFiniteNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[5]{0} constant({
(1, inf), (inf, 1), (inf, nan), (inf, inf), (nan, inf)})
real = f32[5]{0} constant({1, 1, 1, 1, 1})
imag = f32[5]{0} constant({1, 1, 1, 1, 1})
complex.2 = c64[5]{0} complex(real, imag)
ROOT divide.1 = c64[5]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithZeroNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[9]{0} constant({
(1, 1), (1, nan), (1, inf), (inf, inf), (inf, 1),
(inf, nan), (nan, 1), (nan, inf), (nan, nan)})
real = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})
imag = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})
complex.2 = c64[9]{0} complex(real, imag)
ROOT divide.1 = c64[9]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertFloatsToBF16) {
RunTypeConversionTest(R"(
HloModule convertToBF16
ENTRY ConvertToBF16
(f16_ f16[], f32_ f32[], f64_ f64[]) -> (bf16[], bf16[], bf16[]) {
f16_ = f16[] parameter(0)
f32_ = f32[] parameter(1)
f64_ = f64[] parameter(2)
converted_f16 = bf16[] convert(f16[] f16_)
converted_f32 = bf16[] convert(f32[] f32_)
converted_f64 = bf16[] convert(f64[] f64_)
ROOT tuple = (bf16[], bf16[], bf16[]) tuple(converted_f16, converted_f32,
converted_f64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertSignedToBF16) {
RunTypeConversionTest(R"(
HloModule convertToBF16
ENTRY ConvertToBF16 (s8_ s8[], s16_ s16[], s32_ s32[], s64_ s64[]) ->
(bf16[], bf16[], bf16[], bf16[]) {
s8_ = s8[] parameter(0)
s16_ = s16[] parameter(1)
s32_ = s32[] parameter(2)
s64_ = s64[] parameter(3)
converted_s8 = bf16[] convert(s8[] s8_)
converted_s16 = bf16[] convert(s16[] s16_)
converted_s32 = bf16[] convert(s32[] s32_)
converted_s64 = bf16[] convert(s64[] s64_)
ROOT tuple = (bf16[], bf16[], bf16[], bf16[]) tuple(
converted_s8, converted_s16, converted_s32, converted_s64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertUnsignedToBF16) {
RunTypeConversionTest(R"(
HloModule convertToBF16
ENTRY ConvertToBF16 (u8_ u8[], u16_ u16[], u32_ u32[], u64_ u64[]) ->
(bf16[], bf16[], bf16[], bf16[]) {
u8_ = u8[] parameter(0)
u16_ = u16[] parameter(1)
u32_ = u32[] parameter(2)
u64_ = u64[] parameter(3)
converted_u8 = bf16[] convert(u8[] u8_)
converted_u16 = bf16[] convert(u16[] u16_)
converted_u32 = bf16[] convert(u32[] u32_)
converted_u64 = bf16[] convert(u64[] u64_)
ROOT tuple = (bf16[], bf16[], bf16[], bf16[]) tuple(
converted_u8, converted_u16, converted_u32, converted_u64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertBF16ToFloat) {
RunTypeConversionTest(R"(
HloModule convertFromBF16
ENTRY ConvertFromBF16
(to_f16 bf16[], to_f32 bf16[], to_f64 bf16[]) -> (f16[], f32[], f64[]) {
to_f16 = bf16[] parameter(0)
to_f32 = bf16[] parameter(1)
to_f64 = bf16[] parameter(2)
f16_ = f16[] convert(bf16[] to_f16)
f32_ = f32[] convert(bf16[] to_f32)
f64_ = f64[] convert(bf16[] to_f64)
ROOT tuple = (f16[], f32[], f64[]) tuple(f16_, f32_, f64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertBF16ToSigned) {
RunTypeConversionTest(R"(
HloModule convertFromBF16
ENTRY ConvertFromBF16(to_s8 bf16[], to_s16 bf16[], to_s32 bf16[],
to_s64 bf16[]) -> (s8[], s16[], s32[], s64[]) {
to_s8 = bf16[] parameter(0)
to_s16 = bf16[] parameter(1)
to_s32 = bf16[] parameter(2)
to_s64 = bf16[] parameter(3)
s8_ = s8[] convert(bf16[] to_s8)
s16_ = s16[] convert(bf16[] to_s16)
s32_ = s32[] convert(bf16[] to_s32)
s64_ = s64[] convert(bf16[] to_s64)
ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertBF16ToUnsigned) {
RunTypeConversionTest(R"(
HloModule convertFromBF16
ENTRY ConvertFromBF16(to_u8 bf16[], to_u16 bf16[], to_u32 bf16[],
to_u64 bf16[]) -> (u8[], u16[], u32[], u64[]) {
to_u8 = bf16[] parameter(0)
to_u16 = bf16[] parameter(1)
to_u32 = bf16[] parameter(2)
to_u64 = bf16[] parameter(3)
u8_ = u8[] convert(bf16[] to_u8)
u16_ = u16[] convert(bf16[] to_u16)
u32_ = u32[] convert(bf16[] to_u32)
u64_ = u64[] convert(bf16[] to_u64)
ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertBF16ToComplex) {
RunTypeConversionTest(R"(
HloModule convertFromBF16
ENTRY ConvertFromBF16
(to_c64 bf16[], to_c128 bf16[]) -> (c64[], c128[]) {
to_c64 = bf16[] parameter(0)
to_c128 = bf16[] parameter(1)
c64_ = c64[] convert(bf16[] to_c64)
c128_ = c128[] convert(bf16[] to_c128)
ROOT tuple = (c64[], c128[]) tuple(c64_, c128_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, CompareBF16) {
constexpr char hlo_text[] = R"(
HloModule compareBF16
ENTRY main {
p0 = bf16[4] parameter(0)
p1 = bf16[4] parameter(1)
ROOT cmp = pred[4] compare(p0, p1), direction=LT
})";
Literal lhs = LiteralUtil::CreateR1<float>({1, 2, 3, 4});
Literal rhs = LiteralUtil::CreateR1<float>({4, 3, 2, 1});
lhs = LiteralUtil::ConvertF32ToBF16(lhs);
rhs = LiteralUtil::ConvertF32ToBF16(rhs);
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, IotaBF16) {
constexpr char hlo_text[] = R"(
HloModule IotaBF16
ENTRY main {
ROOT iota_ = bf16[4] iota(), iota_dimension=0
}
)";
RunTest(hlo_text, {});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, BatchDotBF16) {
const char* const hlo_text = R"(
HloModule matmul
ENTRY main {
x = bf16[8,16] parameter(0)
y = bf16[8,16,32] parameter(1)
ROOT dot = bf16[8,32] dot(x, y), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertFloatsToF8E4FNUZ) {
RunTypeConversionTest(R"(
HloModule convertToF8E4FNUZ
ENTRY ConvertToF8E4FNUZ
(f16_ f16[], f32_ f32[], f64_ f64[], bf16_ bf16[]) -> (f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[]) {
f16_ = f16[] parameter(0)
f32_ = f32[] parameter(1)
f64_ = f64[] parameter(2)
bf16_ = bf16[] parameter(3)
converted_f16 = f8e4m3fnuz[] convert(f16[] f16_)
converted_f32 = f8e4m3fnuz[] convert(f32[] f32_)
converted_f64 = f8e4m3fnuz[] convert(f64[] f64_)
converted_bf16 = f8e4m3fnuz[] convert(bf16[] bf16_)
ROOT tuple = (f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[]) tuple(
converted_f16, converted_f32, converted_f64, converted_bf16)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertSignedToF8E4FNUZ) {
RunTypeConversionTest(R"(
HloModule convertToF8E4FNUZ
ENTRY ConvertToF8E4FNUZ (s8_ s8[], s16_ s16[], s32_ s32[], s64_ s64[]) ->
(f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[]) {
s8_ = s8[] parameter(0)
s16_ = s16[] parameter(1)
s32_ = s32[] parameter(2)
s64_ = s64[] parameter(3)
converted_s8 = f8e4m3fnuz[] convert(s8[] s8_)
converted_s16 = f8e4m3fnuz[] convert(s16[] s16_)
converted_s32 = f8e4m3fnuz[] convert(s32[] s32_)
converted_s64 = f8e4m3fnuz[] convert(s64[] s64_)
ROOT tuple = (f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[]) tuple(
converted_s8, converted_s16, converted_s32, converted_s64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertUnsignedToF8E4FNUZ) {
RunTypeConversionTest(R"(
HloModule convertToF8E4FNUZ
ENTRY ConvertToF8E4FNUZ (u8_ u8[], u16_ u16[], u32_ u32[], u64_ u64[]) ->
(f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[]) {
u8_ = u8[] parameter(0)
u16_ = u16[] parameter(1)
u32_ = u32[] parameter(2)
u64_ = u64[] parameter(3)
converted_u8 = f8e4m3fnuz[] convert(u8[] u8_)
converted_u16 = f8e4m3fnuz[] convert(u16[] u16_)
converted_u32 = f8e4m3fnuz[] convert(u32[] u32_)
converted_u64 = f8e4m3fnuz[] convert(u64[] u64_)
ROOT tuple = (f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[], f8e4m3fnuz[]) tuple(
converted_u8, converted_u16, converted_u32, converted_u64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E4FNUZToFloat) {
RunTypeConversionTest(R"(
HloModule convertFromF8E4FNUZ
ENTRY ConvertFromF8E4FNUZ
(to_f16 f8e4m3fnuz[], to_f32 f8e4m3fnuz[], to_f64 f8e4m3fnuz[], to_bf16 f8e4m3fnuz[]) -> (f16[], f32[], f64[], bf16[]) {
to_f16 = f8e4m3fnuz[] parameter(0)
to_f32 = f8e4m3fnuz[] parameter(1)
to_f64 = f8e4m3fnuz[] parameter(2)
to_bf16 = f8e4m3fnuz[] parameter(3)
f16_ = f16[] convert(f8e4m3fnuz[] to_f16)
f32_ = f32[] convert(f8e4m3fnuz[] to_f32)
f64_ = f64[] convert(f8e4m3fnuz[] to_f64)
bf16_ = bf16[] convert(f8e4m3fnuz[] to_f64)
ROOT tuple = (f16[], f32[], f64[], bf16[]) tuple(f16_, f32_, f64_, bf16_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E4FNUZToSigned) {
RunTypeConversionTest(R"(
HloModule convertFromF8E4FNUZ
ENTRY ConvertFromF8E4FNUZ(to_s8 f8e4m3fnuz[], to_s16 f8e4m3fnuz[], to_s32 f8e4m3fnuz[],
to_s64 f8e4m3fnuz[]) -> (s8[], s16[], s32[], s64[]) {
to_s8 = f8e4m3fnuz[] parameter(0)
to_s16 = f8e4m3fnuz[] parameter(1)
to_s32 = f8e4m3fnuz[] parameter(2)
to_s64 = f8e4m3fnuz[] parameter(3)
s8_ = s8[] convert(f8e4m3fnuz[] to_s8)
s16_ = s16[] convert(f8e4m3fnuz[] to_s16)
s32_ = s32[] convert(f8e4m3fnuz[] to_s32)
s64_ = s64[] convert(f8e4m3fnuz[] to_s64)
ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E4FNUZToUnsigned) {
RunTypeConversionTest(R"(
HloModule convertFromF8E4FNUZ
ENTRY ConvertFromF8E4FNUZ(to_u8 f8e4m3fnuz[], to_u16 f8e4m3fnuz[], to_u32 f8e4m3fnuz[],
to_u64 f8e4m3fnuz[]) -> (u8[], u16[], u32[], u64[]) {
to_u8 = f8e4m3fnuz[] parameter(0)
to_u16 = f8e4m3fnuz[] parameter(1)
to_u32 = f8e4m3fnuz[] parameter(2)
to_u64 = f8e4m3fnuz[] parameter(3)
u8_ = u8[] convert(f8e4m3fnuz[] to_u8)
u16_ = u16[] convert(f8e4m3fnuz[] to_u16)
u32_ = u32[] convert(f8e4m3fnuz[] to_u32)
u64_ = u64[] convert(f8e4m3fnuz[] to_u64)
ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E4FNUZToComplex) {
RunTypeConversionTest(R"(
HloModule convertFromF8E4FNUZ
ENTRY ConvertFromF8E4FNUZ
(to_c64 f8e4m3fnuz[], to_c128 f8e4m3fnuz[]) -> (c64[], c128[]) {
to_c64 = f8e4m3fnuz[] parameter(0)
to_c128 = f8e4m3fnuz[] parameter(1)
c64_ = c64[] convert(f8e4m3fnuz[] to_c64)
c128_ = c128[] convert(f8e4m3fnuz[] to_c128)
ROOT tuple = (c64[], c128[]) tuple(c64_, c128_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, CompareF8E4FNUZ) {
constexpr char hlo_text[] = R"(
HloModule compareF8E4FNUZ
ENTRY main {
p0 = f8e4m3fnuz[4] parameter(0)
p1 = f8e4m3fnuz[4] parameter(1)
ROOT cmp = pred[4] compare(p0, p1), direction=LT
})";
Literal lhs = LiteralUtil::CreateR1<float>({1, 2, 3, 4});
Literal rhs = LiteralUtil::CreateR1<float>({4, 3, 2, 1});
lhs = LiteralUtil::ConvertF32ToF8E4M3FNUZ(lhs);
rhs = LiteralUtil::ConvertF32ToF8E4M3FNUZ(rhs);
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, IotaF8E4FNUZ) {
constexpr char hlo_text[] = R"(
HloModule IotaF8E4FNUZ
ENTRY main {
ROOT iota_ = f8e4m3fnuz[4] iota(), iota_dimension=0
}
)";
RunTest(hlo_text, {});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertFloatsToF8E5FNUZ) {
RunTypeConversionTest(R"(
HloModule convertToF8E5FNUZ
ENTRY ConvertToF8E5FNUZ
(f16_ f16[], f32_ f32[], f64_ f64[], bf16_ bf16[]) -> (f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[]) {
f16_ = f16[] parameter(0)
f32_ = f32[] parameter(1)
f64_ = f64[] parameter(2)
bf16_ = bf16[] parameter(3)
converted_f16 = f8e5m2fnuz[] convert(f16[] f16_)
converted_f32 = f8e5m2fnuz[] convert(f32[] f32_)
converted_f64 = f8e5m2fnuz[] convert(f64[] f64_)
converted_bf16 = f8e5m2fnuz[] convert(bf16[] bf16_)
ROOT tuple = (f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[]) tuple(
converted_f16, converted_f32, converted_f64, converted_bf16)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertSignedToF8E5FNUZ) {
RunTypeConversionTest(R"(
HloModule convertToF8E5FNUZ
ENTRY ConvertToF8E5FNUZ (s8_ s8[], s16_ s16[], s32_ s32[], s64_ s64[]) ->
(f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[]) {
s8_ = s8[] parameter(0)
s16_ = s16[] parameter(1)
s32_ = s32[] parameter(2)
s64_ = s64[] parameter(3)
converted_s8 = f8e5m2fnuz[] convert(s8[] s8_)
converted_s16 = f8e5m2fnuz[] convert(s16[] s16_)
converted_s32 = f8e5m2fnuz[] convert(s32[] s32_)
converted_s64 = f8e5m2fnuz[] convert(s64[] s64_)
ROOT tuple = (f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[]) tuple(
converted_s8, converted_s16, converted_s32, converted_s64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertUnsignedToF8E5FNUZ) {
RunTypeConversionTest(R"(
HloModule convertToF8E5FNUZ
ENTRY ConvertToF8E5FNUZ (u8_ u8[], u16_ u16[], u32_ u32[], u64_ u64[]) ->
(f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[]) {
u8_ = u8[] parameter(0)
u16_ = u16[] parameter(1)
u32_ = u32[] parameter(2)
u64_ = u64[] parameter(3)
converted_u8 = f8e5m2fnuz[] convert(u8[] u8_)
converted_u16 = f8e5m2fnuz[] convert(u16[] u16_)
converted_u32 = f8e5m2fnuz[] convert(u32[] u32_)
converted_u64 = f8e5m2fnuz[] convert(u64[] u64_)
ROOT tuple = (f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[], f8e5m2fnuz[]) tuple(
converted_u8, converted_u16, converted_u32, converted_u64)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E5FNUZToFloat) {
RunTypeConversionTest(R"(
HloModule convertFromF8E5FNUZ
ENTRY ConvertFromF8E5FNUZ
(to_f16 f8e5m2fnuz[], to_f32 f8e5m2fnuz[], to_f64 f8e5m2fnuz[]) -> (f16[], f32[], f64[]) {
to_f16 = f8e5m2fnuz[] parameter(0)
to_f32 = f8e5m2fnuz[] parameter(1)
to_f64 = f8e5m2fnuz[] parameter(2)
f16_ = f16[] convert(f8e5m2fnuz[] to_f16)
f32_ = f32[] convert(f8e5m2fnuz[] to_f32)
f64_ = f64[] convert(f8e5m2fnuz[] to_f64)
ROOT tuple = (f16[], f32[], f64[]) tuple(f16_, f32_, f64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E5FNUZToSigned) {
RunTypeConversionTest(R"(
HloModule convertFromF8E5FNUZ
ENTRY ConvertFromF8E5FNUZ(to_s8 f8e5m2fnuz[], to_s16 f8e5m2fnuz[], to_s32 f8e5m2fnuz[],
to_s64 f8e5m2fnuz[]) -> (s8[], s16[], s32[], s64[]) {
to_s8 = f8e5m2fnuz[] parameter(0)
to_s16 = f8e5m2fnuz[] parameter(1)
to_s32 = f8e5m2fnuz[] parameter(2)
to_s64 = f8e5m2fnuz[] parameter(3)
s8_ = s8[] convert(f8e5m2fnuz[] to_s8)
s16_ = s16[] convert(f8e5m2fnuz[] to_s16)
s32_ = s32[] convert(f8e5m2fnuz[] to_s32)
s64_ = s64[] convert(f8e5m2fnuz[] to_s64)
ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E5FNUZToUnsigned) {
RunTypeConversionTest(R"(
HloModule convertFromF8E5FNUZ
ENTRY ConvertFromF8E5FNUZ(to_u8 f8e5m2fnuz[], to_u16 f8e5m2fnuz[], to_u32 f8e5m2fnuz[],
to_u64 f8e5m2fnuz[]) -> (u8[], u16[], u32[], u64[]) {
to_u8 = f8e5m2fnuz[] parameter(0)
to_u16 = f8e5m2fnuz[] parameter(1)
to_u32 = f8e5m2fnuz[] parameter(2)
to_u64 = f8e5m2fnuz[] parameter(3)
u8_ = u8[] convert(f8e5m2fnuz[] to_u8)
u16_ = u16[] convert(f8e5m2fnuz[] to_u16)
u32_ = u32[] convert(f8e5m2fnuz[] to_u32)
u64_ = u64[] convert(f8e5m2fnuz[] to_u64)
ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ConvertF8E5FNUZToComplex) {
RunTypeConversionTest(R"(
HloModule convertFromF8E5FNUZ
ENTRY ConvertFromF8E5FNUZ
(to_c64 f8e5m2fnuz[], to_c128 f8e5m2fnuz[]) -> (c64[], c128[]) {
to_c64 = f8e5m2fnuz[] parameter(0)
to_c128 = f8e5m2fnuz[] parameter(1)
c64_ = c64[] convert(f8e5m2fnuz[] to_c64)
c128_ = c128[] convert(f8e5m2fnuz[] to_c128)
ROOT tuple = (c64[], c128[]) tuple(c64_, c128_)
}
)");
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, CompareF8E5FNUZ) {
constexpr char hlo_text[] = R"(
HloModule compareF8E5FNUZ
ENTRY main {
p0 = f8e5m2fnuz[4] parameter(0)
p1 = f8e5m2fnuz[4] parameter(1)
ROOT cmp = pred[4] compare(p0, p1), direction=LT
})";
Literal lhs = LiteralUtil::CreateR1<float>({1, 2, 3, 4});
Literal rhs = LiteralUtil::CreateR1<float>({4, 3, 2, 1});
lhs = LiteralUtil::ConvertF32ToF8E5M2FNUZ(lhs);
rhs = LiteralUtil::ConvertF32ToF8E5M2FNUZ(rhs);
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, IotaF8E5FNUZ) {
constexpr char hlo_text[] = R"(
HloModule IotaF8E5FNUZ
ENTRY main {
ROOT iota_ = f8e5m2fnuz[4] iota(), iota_dimension=0
}
)";
RunTest(hlo_text, {});
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumHandlesNaNsOnTheLeft) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT min = f32[5,5] minimum(nans, neg1s)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
DISABLED_MinimumHandlesNaNsOnTheRight) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT min = f32[5,5] minimum(neg1s, nans)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumHandlesNaNsOnTheLeft) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT max = f32[5,5] maximum(nans, neg1s)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumHandlesNaNsOnTheRight) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT max = f32[5,5] maximum(neg1s, nans)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumReturnsLHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT min = f32[5,5] minimum(zeros, ones)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumReturnsRHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT min = f32[5,5] minimum(ones, zeros)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumReturnsLHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT max = f32[5,5] maximum(ones, zeros)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumReturnsRHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT max = f32[5,5] maximum(zeros, ones)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
class ElementalIrEmitterInternalTest : public HloTestBase {};
XLA_TEST_F(ElementalIrEmitterInternalTest, SparseDotIsUnsupported) {
constexpr absl::string_view kHloText = R"(
HloModule test
ENTRY main {
lhs = f16[5,16] parameter(0)
rhs = f16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloText));
HloInstruction* root = module->entry_computation()->root_instruction();
llvm::LLVMContext llvm_context;
llvm::Module llvm_module("", llvm_context);
llvm::IRBuilder<> builder(llvm_context);
ElementalIrEmitterForTests emitter(&llvm_module, &builder);
llvm_ir::IrArray::Index test_index{builder.getInt64Ty()};
auto result = emitter.TestElementalDot(root, test_index);
EXPECT_FALSE(result.ok());
}
}
} |
1,951 | cpp | tensorflow/tensorflow | collective_ops_utils | third_party/xla/xla/service/collective_ops_utils.cc | third_party/xla/xla/service/collective_ops_utils_test.cc | #ifndef XLA_SERVICE_COLLECTIVE_OPS_UTILS_H_
#define XLA_SERVICE_COLLECTIVE_OPS_UTILS_H_
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/pattern_matcher.h"
#include "xla/stream_executor/device_memory.h"
#include "tsl/platform/blocking_counter.h"
namespace xla {
enum class ReductionKind { SUM, PRODUCT, MIN, MAX };
constexpr std::string_view ReductionKindToString(ReductionKind reduction_kind) {
switch (reduction_kind) {
case ReductionKind::SUM:
return "sum";
case ReductionKind::PRODUCT:
return "prod";
case ReductionKind::MIN:
return "min";
case ReductionKind::MAX:
return "max";
}
}
std::optional<ReductionKind> MatchReductionInstruction(
const HloInstruction* hlo);
std::optional<ReductionKind> MatchReductionComputation(
const HloComputation* computation);
std::optional<Literal> GetReductionIdentity(ReductionKind kind,
PrimitiveType type);
enum class CollectiveOpGroupMode {
kCrossReplica,
kCrossPartition,
kCrossReplicaAndPartition,
kFlattenedID,
};
absl::StatusOr<std::vector<int>> GetParticipatingIDs(
CollectiveOpGroupMode group_mode, int current_id,
std::optional<int> total_participant_count,
absl::Span<const ReplicaGroup> groups);
absl::string_view CollectiveOpGroupModeToString(
CollectiveOpGroupMode group_mode);
absl::StatusOr<CollectiveOpGroupMode> GetCollectiveOpGroupMode(
bool has_channel_id, std::optional<bool> use_global_device_ids);
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
GetParticipatingDevicesGroups(const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode);
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode);
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode replica_group_mode, int replica_count,
int partition_count);
absl::StatusOr<std::vector<GlobalDeviceId>> GetParticipatingDevices(
GlobalDeviceId device_id, const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode);
absl::StatusOr<std::vector<int64_t>> GetPariticipantCountsForReplicaGroups(
int64_t num_replicas, int64_t num_partitions,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode);
bool ReplicaGroupsOrthogonal(absl::Span<const ReplicaGroup> first,
absl::Span<const ReplicaGroup> second);
bool ReplicaGroupsEqual(absl::Span<const ReplicaGroup> first,
absl::Span<const ReplicaGroup> second);
inline constexpr absl::string_view kNopCustomCallTarget = "AllocateBuffer";
inline constexpr absl::string_view kNopReturnTokenCustomCallTarget =
"NopReturnToken";
bool IsCollective(const HloInstruction* instruction);
HloInstruction* IsOrHasCollectiveWithChannelId(HloInstruction* instruction);
bool IsSyncCollective(const HloInstruction* instr);
bool IsForwardCycle(const std::vector<std::pair<int64_t, int64_t>>& pairs);
bool IsBackwardCycle(const std::vector<std::pair<int64_t, int64_t>>& pairs);
struct RendezvousKey {
enum CollectiveOpKind {
kCrossModule,
kCrossReplica,
};
explicit RendezvousKey(const RunId& run_id,
std::vector<GlobalDeviceId> global_devices,
int num_local_participants,
CollectiveOpKind collective_op_kind, int64_t op_id)
: run_id(run_id),
global_devices(std::move(global_devices)),
num_local_participants(num_local_participants),
collective_op_kind(collective_op_kind),
op_id(op_id) {}
template <typename H>
friend H AbslHashValue(H h, const RendezvousKey& k) {
return H::combine(std::move(h), k.run_id, k.global_devices,
k.num_local_participants, k.collective_op_kind, k.op_id);
}
friend bool operator==(const RendezvousKey& a, const RendezvousKey& b) {
return a.run_id == b.run_id && a.global_devices == b.global_devices &&
a.num_local_participants == b.num_local_participants &&
a.collective_op_kind == b.collective_op_kind &&
a.op_id == b.op_id;
}
friend bool operator!=(const RendezvousKey& a, const RendezvousKey& b) {
return !(a == b);
}
absl::string_view CollectiveOpKindString() const {
switch (collective_op_kind) {
case kCrossModule:
return "cross_module";
case kCrossReplica:
return "cross_replica";
}
}
std::string ToString() const {
return absl::StrFormat(
"RendezvousKey{run_id=%s, global_devices=[%s], "
"num_local_participants=%d, collective_op_kind=%s, op_id=%d}",
run_id.ToString(), GlobalDeviceIdsToString(global_devices),
num_local_participants, CollectiveOpKindString(), op_id);
}
RunId run_id;
std::vector<GlobalDeviceId> global_devices;
int num_local_participants;
CollectiveOpKind collective_op_kind;
int64_t op_id;
};
template <typename DescFn>
void WaitAndLogIfStuck(tsl::BlockingCounter* counter, const DescFn& desc_fn) {
VLOG(3) << "Begin: " << desc_fn();
const std::chrono::milliseconds timeout(5000);
bool ok = counter->WaitFor(timeout);
if (ok) {
VLOG(3) << "Finished: " << desc_fn();
return;
}
LOG(ERROR) << "This thread has been waiting for " << timeout.count()
<< "ms for and may be stuck: " << desc_fn();
counter->Wait();
LOG(ERROR) << "Thread is unstuck! Warning above was a false-positive. "
"Perhaps the timeout is too short: "
<< desc_fn();
}
struct ParticipantData {
ParticipantData(const RendezvousKey& rendezvous_key, int local_rank)
: rendezvous_key(rendezvous_key), local_rank(local_rank) {}
virtual ~ParticipantData() {}
RendezvousKey rendezvous_key;
int local_rank;
virtual std::string ToString() const = 0;
};
template <typename I, typename O,
typename =
std::enable_if_t<std::is_base_of<ParticipantData, I>::value>>
class Rendezvous {
public:
virtual ~Rendezvous() {}
explicit Rendezvous(const RendezvousKey& k)
: participants_(k.num_local_participants), key_(k) {}
static absl::StatusOr<O> SubmitParticipant(
absl::FunctionRef<std::shared_ptr<Rendezvous<I, O>>()> rendezvous_getter,
I participant) {
std::shared_ptr<Rendezvous<I, O>> rendezvous = rendezvous_getter();
TF_ASSIGN_OR_RETURN(auto p, rendezvous->SubmitParticipant(participant));
std::shared_ptr<tsl::BlockingCounter> blocking_counter = p.second;
rendezvous.reset();
blocking_counter->DecrementCount();
xla::WaitAndLogIfStuck(blocking_counter.get(), [&] {
return absl::StrFormat(
"participant waiting for all threads to drop their reference to the "
"rendezvous: %p",
rendezvous.get());
});
return std::move(p.first);
}
protected:
virtual absl::StatusOr<O> RunCollectiveOp(const I& participant) = 0;
std::vector<std::optional<I>> participants_;
private:
absl::Mutex mu_;
absl::StatusOr<std::pair<O, std::shared_ptr<tsl::BlockingCounter>>>
SubmitParticipant(const I& participant) {
{
absl::MutexLock lock(&mu_);
CHECK(!participants_[participant.local_rank].has_value());
participants_[participant.local_rank] = participant;
}
all_participants_present_.DecrementCount();
WaitAndLogIfStuck(&all_participants_present_, [&] {
return absl::StrFormat(
"participant %s waiting for all participants to arrive at rendezvous "
"%s",
participant.ToString(), key_.ToString());
});
TF_ASSIGN_OR_RETURN(O output, RunCollectiveOp(participant));
return std::make_pair(std::move(output), returned_blocking_counter_);
}
const RendezvousKey key_;
tsl::BlockingCounter all_participants_present_{key_.num_local_participants};
std::shared_ptr<tsl::BlockingCounter> returned_blocking_counter_{
std::make_shared<tsl::BlockingCounter>(key_.num_local_participants)};
};
inline bool MayPipelineSendRecvChannel(int64_t channel_id) {
return channel_id > 0;
}
constexpr char kSendRecvSourceTargetPairsAttr[] =
"_xla_send_recv_source_target_pairs";
constexpr char kSendRecvPipelineAttr[] = "_xla_send_recv_pipeline";
constexpr char kSendRecvValidationAttr[] = "_xla_send_recv_validation";
}
#endif
#include "xla/service/collective_ops_utils.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
std::optional<ReductionKind> MatchReductionInstruction(
const HloInstruction* hlo) {
PrimitiveType type = hlo->shape().element_type();
switch (hlo->opcode()) {
case HloOpcode::kAdd:
return ReductionKind::SUM;
case HloOpcode::kMultiply:
return ReductionKind::PRODUCT;
case HloOpcode::kMinimum:
return ReductionKind::MIN;
case HloOpcode::kMaximum:
return ReductionKind::MAX;
case HloOpcode::kAnd:
return type == PRED ? std::optional<ReductionKind>(ReductionKind::MIN)
: std::nullopt;
case HloOpcode::kOr:
return type == PRED ? std::optional<ReductionKind>(ReductionKind::MAX)
: std::nullopt;
default:
return std::nullopt;
}
}
std::optional<ReductionKind> MatchReductionComputation(
const HloComputation* computation) {
namespace m = match;
const HloInstruction* root = computation->root_instruction();
auto kind = MatchReductionInstruction(root);
if (kind && !Match(root, m::Op()
.WithBinaryOperandsAnyOrder(m::Parameter(0),
m::Parameter(1))
.WithShape(m::Shape().IsEffectiveScalar()))) {
kind = std::nullopt;
}
return kind;
}
std::optional<Literal> GetReductionIdentity(ReductionKind kind,
PrimitiveType type) {
switch (kind) {
case ReductionKind::SUM:
return LiteralUtil::Zero(type);
case ReductionKind::PRODUCT:
return LiteralUtil::One(type);
case ReductionKind::MIN:
return LiteralUtil::MaxValue(type);
case ReductionKind::MAX:
return LiteralUtil::MinValue(type);
default:
return std::nullopt;
}
}
absl::StatusOr<std::vector<int>> GetParticipatingIDs(
CollectiveOpGroupMode group_mode, int current_id,
std::optional<int> total_participant_count,
absl::Span<const ReplicaGroup> groups) {
if (groups.empty()) {
TF_RET_CHECK(total_participant_count.has_value());
std::vector<int> all_participants(*total_participant_count);
absl::c_iota(all_participants, 0);
return all_participants;
}
auto group_formatter = [](std::string* out, const ReplicaGroup& group) {
out->append("[");
out->append(absl::StrJoin(group.replica_ids(), ", "));
out->append("]");
};
std::optional<ReplicaGroup> group;
for (const ReplicaGroup& g : groups) {
if (absl::c_linear_search(g.replica_ids(), current_id)) {
TF_RET_CHECK(!group.has_value())
<< "Replica ID " << current_id << " appears twice in replica groups"
<< "; group_mode=" << CollectiveOpGroupModeToString(group_mode)
<< "; groups_size=" << groups.size()
<< "; groups= " << absl::StrJoin(groups, ", ", group_formatter);
group = g;
}
}
TF_RET_CHECK(group.has_value())
<< "Replica ID " << current_id << " doesn't appear in replica groups"
<< "; group_mode=" << CollectiveOpGroupModeToString(group_mode)
<< "; groups_size=" << groups.size()
<< "; groups= " << absl::StrJoin(groups, ", ", group_formatter);
return std::vector<int>(group->replica_ids().begin(),
group->replica_ids().end());
}
absl::StatusOr<CollectiveOpGroupMode> GetCollectiveOpGroupMode(
bool has_channel_id, std::optional<bool> use_global_device_ids) {
if (!has_channel_id) {
if (!use_global_device_ids.has_value() || !*use_global_device_ids) {
return CollectiveOpGroupMode::kCrossReplica;
} else {
return InvalidArgument(
"Invalid combination of has_channel_id and use_global_device_ids");
}
} else {
if (!use_global_device_ids.has_value()) {
return CollectiveOpGroupMode::kCrossPartition;
} else if (!*use_global_device_ids) {
return CollectiveOpGroupMode::kCrossReplicaAndPartition;
} else {
return CollectiveOpGroupMode::kFlattenedID;
}
}
}
absl::string_view CollectiveOpGroupModeToString(
CollectiveOpGroupMode group_mode) {
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
return "kCrossReplica";
case CollectiveOpGroupMode::kCrossPartition:
return "kCrossPartition";
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return "kCrossReplicaAndPartition";
case CollectiveOpGroupMode::kFlattenedID:
return "kFlattenedID";
}
}
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
GetParticipatingDevicesGroups(const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
int replica_count = device_assignment.replica_count();
int partition_count = device_assignment.computation_count();
std::vector<ReplicaGroup> participating_replica_groups =
SpanToVector(replica_groups);
if (replica_groups.empty()) {
if (group_mode == CollectiveOpGroupMode::kFlattenedID) {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
}
int total_participant_count;
if (group_mode == CollectiveOpGroupMode::kCrossPartition) {
total_participant_count = partition_count;
} else {
total_participant_count = replica_count;
}
ReplicaGroup replica_group = ReplicaGroup();
for (int id = 0; id < total_participant_count; id++) {
replica_group.add_replica_ids(id);
}
participating_replica_groups.push_back(replica_group);
}
std::vector<std::vector<GlobalDeviceId>> groups;
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
for (const auto& replica_group : participating_replica_groups) {
for (int partition_id = 0; partition_id < partition_count;
partition_id++) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int replica_id : replica_group.replica_ids()) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
}
return groups;
}
case CollectiveOpGroupMode::kCrossPartition: {
for (const auto& replica_group : participating_replica_groups) {
for (int replica_id = 0; replica_id < replica_count; replica_id++) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int partition_id : replica_group.replica_ids()) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
}
return groups;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
for (const auto& replica_group : participating_replica_groups) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size() *
partition_count);
for (int replica_id : replica_group.replica_ids()) {
for (int partition_id = 0; partition_id < partition_count;
partition_id++) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
}
groups.push_back(participants);
}
return groups;
}
case CollectiveOpGroupMode::kFlattenedID: {
for (const auto& replica_group : participating_replica_groups) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int flattened_id : replica_group.replica_ids()) {
int replica_id = flattened_id / partition_count;
int partition_id = flattened_id % partition_count;
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
return groups;
}
}
}
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
absl::flat_hash_map<GlobalDeviceId, int64_t> device_id_to_flattened_id;
for (int r = 0; r < device_assignment.replica_count(); ++r) {
for (int c = 0; c < device_assignment.computation_count(); ++c) {
GlobalDeviceId device_id = GlobalDeviceId(device_assignment(r, c));
int64_t flattened_id = r * device_assignment.computation_count() + c;
device_id_to_flattened_id[device_id] = flattened_id;
}
}
std::vector<ReplicaGroup> flattened_id_groups;
TF_ASSIGN_OR_RETURN(std::vector<std::vector<GlobalDeviceId>> device_groups,
GetParticipatingDevicesGroups(
device_assignment, replica_groups, group_mode));
for (const auto& device_group : device_groups) {
ReplicaGroup flattened_id_group;
flattened_id_group.mutable_replica_ids()->Reserve(device_group.size());
for (const GlobalDeviceId& device_id : device_group) {
flattened_id_group.add_replica_ids(device_id_to_flattened_id[device_id]);
}
flattened_id_groups.push_back(flattened_id_group);
}
return flattened_id_groups;
}
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode replica_group_mode, int replica_count,
int partition_count) {
std::vector<ReplicaGroup> filled_empty_replica_group;
absl::Span<const ReplicaGroup> original_replica_groups = replica_groups;
std::vector<ReplicaGroup> flattened_replica_groups;
if (replica_groups.empty()) {
filled_empty_replica_group.emplace_back();
const int64_t id_count =
replica_group_mode == CollectiveOpGroupMode::kCrossPartition
? partition_count
: replica_count;
for (int i = 0; i < id_count; ++i) {
filled_empty_replica_group.back().add_replica_ids(i);
}
original_replica_groups = filled_empty_replica_group;
}
if (replica_group_mode == CollectiveOpGroupMode::kFlattenedID) {
flattened_replica_groups.insert(flattened_replica_groups.end(),
original_replica_groups.begin(),
original_replica_groups.end());
} else if (replica_group_mode == CollectiveOpGroupMode::kCrossReplica) {
flattened_replica_groups.resize(original_replica_groups.size() *
partition_count);
for (int64_t i = 0, current_group_offset = 0;
i < original_replica_groups.size();
++i, current_group_offset += partition_count) {
for (int64_t replica_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t partition_id = 0; partition_id < partition_count;
++partition_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[current_group_offset + partition_id]
.add_replica_ids(flattened_id);
}
}
}
} else if (replica_group_mode == CollectiveOpGroupMode::kCrossPartition) {
flattened_replica_groups.resize(original_replica_groups.size() *
replica_count);
for (int64_t i = 0, current_group_offset = 0;
i < original_replica_groups.size();
++i, current_group_offset += replica_count) {
for (int64_t partition_id : origina | #include "xla/service/collective_ops_utils.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <sstream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(CollectiveOpsUtilsTest, GetParticipatingIDs_NoReplicaGroups) {
std::vector<int> actual =
GetParticipatingIDs(CollectiveOpGroupMode::kFlattenedID,
0, 3,
{})
.value();
std::vector<int> expected = {0, 1, 2};
EXPECT_EQ(actual, expected);
}
TEST(CollectiveOpsUtilsTest, GetParticipatingIDs_ReplicaGroups) {
std::vector<ReplicaGroup> replica_groups(3);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(4);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(5);
replica_groups[2].add_replica_ids(2);
replica_groups[2].add_replica_ids(3);
std::vector<int> actual =
GetParticipatingIDs(CollectiveOpGroupMode::kFlattenedID,
1,
std::nullopt,
replica_groups)
.value();
std::vector<int> expected = {1, 5};
EXPECT_EQ(actual, expected);
}
TEST(CollectiveOpsUtilsTest, CollectiveWithChannelId) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %cluster {
%param0 = f32[512]{0} parameter(0)
%copy0 = f32[512]{0} copy(param0)
%reshape0 = f32[1,1,512]{2,0,1} reshape(f32[512]{0} %copy0)
%all-gather = f32[1,4,512]{2,0,1} all-gather(f32[1,1,512]{2,0,1} %reshape0), channel_id=3621, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
%copy1 = f32[1,4,512]{2,0,1} copy(all-gather)
ROOT root = f32[1,4,512]{2,1,0} copy(%copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction *all_gather =
module->entry_computation()->GetInstructionWithName("all-gather");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(all_gather), all_gather);
}
TEST(CollectiveOpsUtilsTest, CollectiveWithChannelId2) {
ReplicaGroup group;
for (int64_t i = 0; i < 8; i++) {
group.add_replica_ids(i);
}
auto builder = HloComputation::Builder("CollectiveWithChannelId2");
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * param_0,
builder.AddParameter(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {1, 512, 4096}), "p0")));
HloInstruction *instr =
builder.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeShape(BF16, {1, 4096, 4096}), {param_0}, 1,
CollectiveDeviceList({group}), true, 231, true));
auto computation = builder.Build(
builder.AddInstruction(HloInstruction::CreateTuple({instr})));
auto fusion =
HloInstruction::CreateFusion(ShapeUtil::MakeShape(BF16, {1, 4096, 4096}),
HloInstruction::FusionKind::kOutput,
{param_0}, computation.get(), "fusion");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(fusion.get()), instr);
auto builder2 = HloComputation::Builder("CollectiveWithChannelId2");
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * param_1,
builder2.AddParameter(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {1, 512, 4096}), "p1")));
HloInstruction *instr_without_channel_id =
builder2.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeShape(BF16, {1, 4096, 4096}), {param_1}, 1, {group},
true, std::nullopt, true));
auto computation2 = builder2.Build(builder2.AddInstruction(
HloInstruction::CreateTuple({instr_without_channel_id})));
auto fusion2 =
HloInstruction::CreateFusion(ShapeUtil::MakeShape(BF16, {1, 4096, 4096}),
HloInstruction::FusionKind::kOutput,
{param_1}, computation2.get(), "fusion2");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(fusion2.get()), nullptr);
}
std::vector<ReplicaGroup> CreateReplicaGroups(
const std::vector<std::vector<int64_t>> &replica_groups) {
std::vector<ReplicaGroup> result;
result.reserve(replica_groups.size());
for (const auto &replica_group : replica_groups) {
ReplicaGroup group;
for (auto id : replica_group) {
group.add_replica_ids(id);
}
result.push_back(group);
}
return result;
}
}
namespace GetCollectiveOpGroupModeTest {
struct TestCase {
bool has_channel_id;
std::optional<bool> use_global_device_ids;
std::optional<xla::CollectiveOpGroupMode> expected;
std::string ToString() const {
std::ostringstream s;
s << (has_channel_id ? "chnl" : "nochnl");
s << "_"
<< (use_global_device_ids
? (*use_global_device_ids ? "ugdi_true" : "ugdi_false")
: "nougdi");
return s.str();
}
};
std::vector<TestCase> GetTestCases() {
const std::vector<TestCase> test_cases = {
{false, std::nullopt, CollectiveOpGroupMode::kCrossReplica},
{false, false, CollectiveOpGroupMode::kCrossReplica},
{false, true, std::nullopt},
{true, std::nullopt, CollectiveOpGroupMode::kCrossPartition},
{true, false, CollectiveOpGroupMode::kCrossReplicaAndPartition},
{true, true, CollectiveOpGroupMode::kFlattenedID},
};
return test_cases;
}
class GetCollectOpGroupModeTest : public testing::TestWithParam<TestCase> {};
TEST_P(GetCollectOpGroupModeTest, Test) {
const TestCase &tc = GetParam();
absl::StatusOr<CollectiveOpGroupMode> actual =
GetCollectiveOpGroupMode(tc.has_channel_id, tc.use_global_device_ids);
if (tc.expected) {
TF_ASSERT_OK(actual.status());
EXPECT_EQ(*actual, *tc.expected);
} else {
EXPECT_FALSE(actual.ok());
}
}
INSTANTIATE_TEST_SUITE_P(GetCollectOpGroupMode, GetCollectOpGroupModeTest,
testing::ValuesIn(GetTestCases()));
}
namespace GetParticipatingDevicesTest {
struct TestCase {
xla::Array2D<int> device_assignment;
std::vector<std::vector<int64_t>> replica_groups;
bool has_channel_id;
std::optional<bool> use_global_device_ids;
struct CurrentIdAndOutput {
int current_id;
std::vector<int> expected_output;
};
std::vector<CurrentIdAndOutput> subtests;
std::vector<std::vector<int>> participating_device_groups;
bool expected_failure;
std::string ToString() const;
};
std::string TestCase::ToString() const {
std::ostringstream s;
absl::StatusOr<CollectiveOpGroupMode> group_mode =
GetCollectiveOpGroupMode(has_channel_id, use_global_device_ids);
if (group_mode.ok()) {
s << CollectiveOpGroupModeToString(*group_mode);
} else {
s << "Invalid";
}
s << "_" << device_assignment.n1() << "x" << device_assignment.n2();
s << "_" << (replica_groups.empty() ? "NoRG" : "RG");
s << "_" << subtests.size() << "SubTests";
return s.str();
}
std::ostream &operator<<(std::ostream &os, const TestCase &tc) {
os << tc.ToString();
return os;
}
std::vector<TestCase> GetTestCases() {
std::vector<TestCase> test_cases;
const std::vector<TestCase> cross_replica_test_cases = {
{
{{33}, {44}, {55}},
{},
false,
false,
{
{33, {33, 44, 55}},
{44, {33, 44, 55}},
},
{{33, 44, 55}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{},
false,
false,
{
{33, {33, 44, 55}},
{34, {34, 45, 56}},
{45, {34, 45, 56}},
},
{{33, 44, 55}, {34, 45, 56}},
false
},
{
{{33}, {44}, {55}},
{{0}, {1, 2}},
false,
false,
{
{33, {33}},
{44, {44, 55}},
},
{{ 33 }, {44, 55}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}},
false,
false,
{
{33, {33}},
{34, {34}},
{45, {45, 56}},
},
{{33}, {34}, {44, 55}, {45, 56}},
false
},
};
const std::vector<TestCase> cross_partition_test_cases = {
{
{
{33, 34, 35, 36}, {44, 45, 46, 47}, {55, 56, 57, 58}
},
{{0, 1}, {2, 3}},
true,
std::nullopt,
{
{33, {33, 34}},
{35, {35, 36}},
{45, {44, 45}},
{47, {46, 47}},
{58, {57, 58}},
},
{{33, 34}, {44, 45}, {55, 56},
{35, 36}, {46, 47}, {57, 58}},
false
}
};
const std::vector<TestCase> cross_replica_and_partition_test_cases = {
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}},
true,
false,
{
{33, {33, 34}},
{34, {33, 34}},
{45, {44, 45, 55, 56}},
},
{{33, 34}, {44, 45, 55, 56}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{},
true,
false,
{
{33, {33, 34, 44, 45, 55, 56}},
{34, {33, 34, 44, 45, 55, 56}},
{56, {33, 34, 44, 45, 55, 56}},
},
{{33, 34, 44, 45, 55, 56}},
false
},
};
const std::vector<TestCase> flattened_id_test_cases = {
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}, {3, 4, 5}},
true,
true,
{
{33, {33}},
{34, {34, 44}},
{44, {34, 44}},
{45, {45, 55, 56}},
{55, {45, 55, 56}},
{56, {45, 55, 56}},
},
{{33}, {34, 44}, {45, 55, 56}},
false
},
{
{{33}},
{},
true,
true,
{
{33, {33}},
},
{{33}},
true
},
};
const std::vector<TestCase> failure_test_cases = {
{
{{33}, {44}, {55}},
{},
false,
true,
{
{33, {}},
},
{{33, 44, 55}},
true
},
};
test_cases.insert(test_cases.end(), cross_replica_test_cases.begin(),
cross_replica_test_cases.end());
for (TestCase tc : cross_replica_test_cases) {
tc.use_global_device_ids = std::nullopt;
test_cases.push_back(tc);
}
test_cases.insert(test_cases.end(), cross_partition_test_cases.begin(),
cross_partition_test_cases.end());
test_cases.insert(test_cases.end(),
cross_replica_and_partition_test_cases.begin(),
cross_replica_and_partition_test_cases.end());
test_cases.insert(test_cases.end(), flattened_id_test_cases.begin(),
flattened_id_test_cases.end());
test_cases.insert(test_cases.end(), failure_test_cases.begin(),
failure_test_cases.end());
return test_cases;
}
class GetParticipatingDevicesTest : public testing::TestWithParam<TestCase> {};
TEST_P(GetParticipatingDevicesTest, Test) {
const TestCase &tc = GetParam();
int64_t num_replicas = tc.device_assignment.n1();
int64_t num_partitions = tc.device_assignment.n2();
DeviceAssignment device_assignment(num_replicas, num_partitions);
for (int64_t replica_id = 0; replica_id < num_replicas; ++replica_id) {
for (int64_t partition_id = 0; partition_id < num_partitions;
++partition_id) {
device_assignment(replica_id, partition_id) =
tc.device_assignment(replica_id, partition_id);
}
}
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups(tc.replica_groups);
absl::StatusOr<CollectiveOpGroupMode> group_mode =
GetCollectiveOpGroupMode(tc.has_channel_id, tc.use_global_device_ids);
if (!group_mode.ok()) {
EXPECT_TRUE(tc.expected_failure);
return;
}
for (const TestCase::CurrentIdAndOutput &subtest : tc.subtests) {
absl::StatusOr<std::vector<GlobalDeviceId>> actual =
GetParticipatingDevices(GlobalDeviceId(subtest.current_id),
device_assignment, replica_groups, *group_mode);
if (!actual.ok()) {
EXPECT_TRUE(tc.expected_failure);
continue;
}
std::vector<GlobalDeviceId> expected;
expected.reserve(subtest.expected_output.size());
absl::c_transform(subtest.expected_output, std::back_inserter(expected),
[](int id) { return GlobalDeviceId(id); });
EXPECT_EQ(*actual, expected);
}
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
actual_device_groups = GetParticipatingDevicesGroups(
device_assignment, replica_groups, *group_mode);
if (!actual_device_groups.ok()) {
EXPECT_TRUE(tc.expected_failure);
return;
}
std::vector<std::vector<GlobalDeviceId>> expect_device_groups;
expect_device_groups.reserve(tc.participating_device_groups.size());
for (auto subgroup : tc.participating_device_groups) {
std::vector<GlobalDeviceId> subgroup_device_ids;
subgroup_device_ids.reserve(subgroup.size());
absl::c_transform(subgroup, std::back_inserter(subgroup_device_ids),
[](int id) { return GlobalDeviceId(id); });
expect_device_groups.push_back(subgroup_device_ids);
}
EXPECT_THAT(*actual_device_groups,
testing::UnorderedElementsAreArray(expect_device_groups));
}
INSTANTIATE_TEST_SUITE_P(GetParticipatingDevices, GetParticipatingDevicesTest,
testing::ValuesIn(GetTestCases()));
}
namespace GetPariticipantCountsForReplicaGroupsTest {
struct TestCase {
std::string test_name;
std::vector<std::vector<int64_t>> replica_groups;
CollectiveOpGroupMode group_mode;
int64_t num_replicas;
int64_t num_partitions;
std::vector<int64_t> expected;
};
class GetPariticipantCountsForReplicaGroupsTest
: public testing::TestWithParam<TestCase> {};
TEST_P(GetPariticipantCountsForReplicaGroupsTest, Test) {
const TestCase &tc = GetParam();
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups(tc.replica_groups);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> actual,
GetPariticipantCountsForReplicaGroups(tc.num_replicas, tc.num_partitions,
replica_groups, tc.group_mode));
EXPECT_THAT(actual, testing::ElementsAreArray(tc.expected));
}
std::vector<TestCase> GetTestCases() {
return {
{
"CrossReplicaEmptyGroup",
{},
CollectiveOpGroupMode::kCrossReplica,
8,
1,
{8},
},
{
"CrossReplicaWithPartitions",
{{0, 1}, {2, 3}},
CollectiveOpGroupMode::kCrossReplica,
4,
2,
{2, 2, 2, 2},
},
{
"CrossReplicaAndPartition",
{{0, 1}, {2, 3}},
CollectiveOpGroupMode::kCrossReplicaAndPartition,
4,
2,
{4, 4},
},
{
"FlattenedID",
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
CollectiveOpGroupMode::kFlattenedID,
4,
2,
{1, 1, 1, 1, 1, 1, 1, 1},
},
};
}
INSTANTIATE_TEST_SUITE_P(
GetPariticipantCountsForReplicaGroups,
GetPariticipantCountsForReplicaGroupsTest,
testing::ValuesIn(GetTestCases()),
[](const testing::TestParamInfo<
GetPariticipantCountsForReplicaGroupsTest::ParamType> &info) {
return info.param.test_name;
});
}
} |
1,952 | cpp | tensorflow/tensorflow | batchnorm_expander | third_party/xla/xla/service/batchnorm_expander.cc | third_party/xla/xla/service/batchnorm_expander_test.cc | #ifndef XLA_SERVICE_BATCHNORM_EXPANDER_H_
#define XLA_SERVICE_BATCHNORM_EXPANDER_H_
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class BatchNormExpander : public HloModulePass {
public:
explicit BatchNormExpander(bool rewrite_training_op = false,
bool rewrite_inference_op = false,
bool rewrite_grad_op = false)
: rewrite_training_op_(rewrite_training_op),
rewrite_inference_op_(rewrite_inference_op),
rewrite_grad_op_(rewrite_grad_op) {}
~BatchNormExpander() override = default;
absl::string_view name() const override { return "batchnorm_expander"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool rewrite_training_op_;
bool rewrite_inference_op_;
bool rewrite_grad_op_;
};
}
#endif
#include "xla/service/batchnorm_expander.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::optional;
class BatchNormExpanderVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
absl::Status HandleBatchNormInference(HloInstruction* batch_norm) override;
absl::Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
static bool Run(HloComputation* computation, bool rewrite_training_op,
bool rewrite_inference_op, bool rewrite_grad_op);
~BatchNormExpanderVisitor() override = default;
private:
explicit BatchNormExpanderVisitor(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op)
: computation_(computation),
rewrite_training_op_(rewrite_training_op),
rewrite_inference_op_(rewrite_inference_op),
rewrite_grad_op_(rewrite_grad_op) {}
HloComputation* GetOrCreateScalarAddComputation(
PrimitiveType primitive_type) {
HloComputation::Builder b("scalar_add_computation");
Shape shape = ShapeUtil::MakeShape(primitive_type, {});
auto scalar_lhs = b.AddInstruction(
HloInstruction::CreateParameter(0, shape, "scalar_lhs"));
auto scalar_rhs = b.AddInstruction(
HloInstruction::CreateParameter(1, shape, "scalar_rhs"));
auto scalar_op = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, scalar_lhs, scalar_rhs));
return computation_->parent()->AddEmbeddedComputation(b.Build(scalar_op));
}
std::unique_ptr<HloInstruction> Rsqrt(HloInstruction* operand) {
return HloInstruction::CreateUnary(operand->shape(), HloOpcode::kRsqrt,
operand);
}
std::unique_ptr<HloInstruction> Mean(
HloInstruction* element_count, HloInstruction* operand,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)>
add_instruction) {
auto broadcast = add_instruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(operand->shape()), element_count, {}));
return HloInstruction::CreateBinary(operand->shape(), HloOpcode::kDivide,
operand, broadcast);
}
std::unique_ptr<HloInstruction> DynamicElementCountPerFeature(
HloInstruction* operand, int64_t feature_index,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)>
add_instruction) {
auto elements_per_feature_s32 = add_instruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
for (int64_t i = 0; i < operand->shape().rank(); ++i) {
if (i == feature_index) {
continue;
}
auto dynamic_dimension_size =
add_instruction(HloInstruction::CreateGetDimensionSize(
ShapeUtil::MakeShape(S32, {}), operand, i));
elements_per_feature_s32 = add_instruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kMultiply,
dynamic_dimension_size, elements_per_feature_s32));
}
return HloInstruction::CreateConvert(
ShapeUtil::MakeShape(operand->shape().element_type(), {}),
elements_per_feature_s32);
}
HloComputation* computation_;
bool rewrite_training_op_;
bool rewrite_inference_op_;
bool rewrite_grad_op_;
};
}
bool BatchNormExpanderVisitor::Run(HloComputation* computation,
bool rewrite_training_op,
bool rewrite_inference_op,
bool rewrite_grad_op) {
BatchNormExpanderVisitor visitor(
computation,
rewrite_training_op,
rewrite_inference_op,
rewrite_grad_op);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormTraining(
HloInstruction* batch_norm) {
if (!rewrite_training_op_) {
return absl::OkStatus();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64_t instruction_count_before = computation_->instruction_count();
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
PrimitiveType ptype = operand_shape.element_type();
int64_t feature_index = batch_norm->feature_index();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
const Shape feature_shape = scale->shape();
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
Shape scalar_broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
auto epsilon = add(HloInstruction::CreateBroadcast(
scalar_broadcast_shape,
add(HloInstruction::CreateConstant(std::move(epsilon_literal))), {}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = operand_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto elements_per_feature =
add(DynamicElementCountPerFeature(operand, feature_index, add));
auto feature_broadcast = [&](HloInstruction* inst) -> HloInstruction* {
Shape feature_broadcast_shape = scalar_broadcast_shape;
feature_broadcast_shape.set_dynamic_dimension(
feature_index, inst->shape().is_dynamic_dimension(0));
return add(HloInstruction::CreateBroadcast(feature_broadcast_shape, inst,
{feature_index}));
};
auto scale_broadcasted = feature_broadcast(scale);
auto offset_broadcasted = feature_broadcast(offset);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
auto operand_squared =
add_binary(operand_shape, HloOpcode::kMultiply, operand, operand);
auto sum = add(HloInstruction::CreateReduce(feature_shape, operand, zero,
dimensions_without_feature,
add_reduce_computation));
auto squared_sum = add(HloInstruction::CreateReduce(
feature_shape, operand_squared, zero, dimensions_without_feature,
add_reduce_computation));
auto mean = add(Mean(elements_per_feature, sum, add));
auto mean_broadcasted = feature_broadcast(mean);
auto square_mean = add(Mean(elements_per_feature, squared_sum, add));
auto mean_square =
add_binary(feature_shape, HloOpcode::kMultiply, mean, mean);
auto var =
add_binary(feature_shape, HloOpcode::kSubtract, square_mean, mean_square);
auto var_broadcasted = feature_broadcast(var);
auto var_add_epsilon = add_binary(var_broadcasted->shape(), HloOpcode::kAdd,
var_broadcasted, epsilon);
auto rsqrt_var_add_epsilon = add(Rsqrt(var_add_epsilon));
auto operand_minus_mean = add_binary(operand_shape, HloOpcode::kSubtract,
operand, mean_broadcasted);
auto normalized = add_binary(operand_shape, HloOpcode::kMultiply,
operand_minus_mean, rsqrt_var_add_epsilon);
auto scaled_normalized = add_binary(operand_shape, HloOpcode::kMultiply,
normalized, scale_broadcasted);
auto shifted_normalized = add_binary(operand_shape, HloOpcode::kAdd,
scaled_normalized, offset_broadcasted);
auto tuple = HloInstruction::CreateTuple({shifted_normalized, mean, var});
if (batch_norm->has_sharding()) {
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
const HloSharding& sharding = batch_norm->sharding();
HloSharding operand_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
optional<int64_t> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(operand_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return absl::OkStatus();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormInference(
HloInstruction* batch_norm) {
if (!rewrite_inference_op_) {
return absl::OkStatus();
}
HloInstruction* operand = batch_norm->mutable_operand(0);
const Shape operand_shape = operand->shape();
int64_t feature_index = batch_norm->feature_index();
PrimitiveType ptype = operand_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
HloInstruction* offset = batch_norm->mutable_operand(2);
HloInstruction* mean = batch_norm->mutable_operand(3);
HloInstruction* var = batch_norm->mutable_operand(4);
const Shape feature_shape = scale->shape();
Shape scalar_broadcast_shape = ShapeUtil::MakeStaticShape(feature_shape);
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon = computation_->AddInstruction(HloInstruction::CreateBroadcast(
scalar_broadcast_shape,
computation_->AddInstruction(
HloInstruction::CreateConstant(std::move(epsilon_literal))),
{}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = operand_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
auto feature_broadcast = [&](HloInstruction* a) {
Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
broadcast_shape.set_dynamic_dimension(feature_index,
a->shape().is_dynamic_dimension(0));
return add(
HloInstruction::CreateBroadcast(broadcast_shape, a, {feature_index}));
};
int64_t instruction_count_before = computation_->instruction_count();
auto true_scale = add_binary(
feature_shape, HloOpcode::kMultiply, scale,
add(Rsqrt(add_binary(feature_shape, HloOpcode::kAdd, var, epsilon))));
auto true_shift = add_binary(
feature_shape, HloOpcode::kSubtract, offset,
add_binary(feature_shape, HloOpcode::kMultiply, mean, true_scale));
auto shifted_normalized =
add_binary(operand_shape, HloOpcode::kAdd,
add_binary(operand_shape, HloOpcode::kMultiply, operand,
feature_broadcast(true_scale)),
feature_broadcast(true_shift));
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
optional<int64_t> unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), operand_shape)) {
inst->set_sharding(sharding);
} else {
inst->set_sharding(default_sharding);
}
}
shifted_normalized->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceInstruction(batch_norm, shifted_normalized));
return absl::OkStatus();
}
absl::Status BatchNormExpanderVisitor::HandleBatchNormGrad(
HloInstruction* batch_norm) {
if (!rewrite_grad_op_) {
return absl::OkStatus();
}
std::vector<HloInstruction*> added_instructions;
auto add = [&](std::unique_ptr<HloInstruction> inst) {
HloInstruction* added_inst = computation_->AddInstruction(std::move(inst));
added_inst->set_metadata(batch_norm->metadata());
added_instructions.push_back(added_inst);
return added_inst;
};
auto add_binary = [&](const Shape& shape, const HloOpcode opcode,
HloInstruction* a, HloInstruction* b) {
return add(HloInstruction::CreateBinary(shape, opcode, a, b));
};
int64_t instruction_count_before = computation_->instruction_count();
HloInstruction* activation = batch_norm->mutable_operand(0);
const Shape activation_shape = activation->shape();
PrimitiveType ptype = activation_shape.element_type();
HloInstruction* scale = batch_norm->mutable_operand(1);
const Shape feature_shape = scale->shape();
HloInstruction* mean = batch_norm->mutable_operand(2);
HloInstruction* variance = batch_norm->mutable_operand(3);
HloInstruction* grad_output = batch_norm->mutable_operand(4);
int64_t feature_index = batch_norm->feature_index();
auto elements_per_feature =
add(DynamicElementCountPerFeature(activation, feature_index, add));
auto zero_literal = LiteralUtil::CreateR0(0.0f);
TF_ASSIGN_OR_RETURN(zero_literal, zero_literal.Convert(ptype));
auto zero = add(HloInstruction::CreateConstant(std::move(zero_literal)));
auto epsilon_literal = LiteralUtil::CreateR0(batch_norm->epsilon());
TF_ASSIGN_OR_RETURN(epsilon_literal, epsilon_literal.Convert(ptype));
auto epsilon_scalar =
add(HloInstruction::CreateConstant(std::move(epsilon_literal)));
auto epsilon_activation = add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(activation_shape), epsilon_scalar, {}));
auto epsilon_feature = add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(feature_shape), epsilon_scalar, {}));
std::vector<int64_t> dimensions_without_feature;
const int64_t rank = activation_shape.rank();
dimensions_without_feature.reserve(rank - 1);
for (int64_t i = 0; i < rank; ++i) {
if (i != feature_index) {
dimensions_without_feature.push_back(i);
}
}
auto activation_broadcast = [&](HloInstruction* hlo) -> HloInstruction* {
Shape broadcast_shape = ShapeUtil::MakeStaticShape(activation_shape);
broadcast_shape.set_dynamic_dimension(feature_index,
hlo->shape().is_dynamic_dimension(0));
return add(
HloInstruction::CreateBroadcast(broadcast_shape, hlo, {feature_index}));
};
auto scale_broadcasted = activation_broadcast(scale);
auto variance_broadcasted = activation_broadcast(variance);
auto mean_broadcasted = activation_broadcast(mean);
auto rsqrt_var_add_epsilon_broadcasted =
add(Rsqrt(add_binary(variance_broadcasted->shape(), HloOpcode::kAdd,
variance_broadcasted, epsilon_activation)));
auto rsqrt_var_add_epsilon = add(Rsqrt(
add_binary(feature_shape, HloOpcode::kAdd, variance, epsilon_feature)));
auto activation_minus_mean = add_binary(
activation_shape, HloOpcode::kSubtract, activation, mean_broadcasted);
auto grad_output_times_activation_minus_mean =
add_binary(activation_shape, HloOpcode::kMultiply, grad_output,
activation_minus_mean);
HloComputation* add_reduce_computation =
GetOrCreateScalarAddComputation(ptype);
auto sum_grad_output_times_activation_minus_mean =
add(HloInstruction::CreateReduce(
feature_shape, grad_output_times_activation_minus_mean, zero,
dimensions_without_feature, add_reduce_computation));
auto grad_beta = add(HloInstruction::CreateReduce(
feature_shape, grad_output, zero, dimensions_without_feature,
add_reduce_computation));
auto grad_scale = add_binary(feature_shape, HloOpcode::kMultiply,
sum_grad_output_times_activation_minus_mean,
rsqrt_var_add_epsilon);
auto i2 = activation_broadcast(grad_beta);
auto i3 = activation_broadcast(sum_grad_output_times_activation_minus_mean);
auto i4 = add_binary(activation_shape, HloOpcode::kMultiply, i3,
activation_minus_mean);
auto i5 =
add_binary(activation_shape, HloOpcode::kDivide, i4,
add_binary(variance_broadcasted->shape(), HloOpcode::kAdd,
variance_broadcasted, epsilon_activation));
Shape scale_times_rsqrt_var_add_epsilon_shape = scale_broadcasted->shape();
for (int64_t i = 0; i < rsqrt_var_add_epsilon_broadcasted->shape().rank();
++i) {
if (rsqrt_var_add_epsilon_broadcasted->shape().is_dynamic_dimension(i)) {
scale_times_rsqrt_var_add_epsilon_shape.set_dynamic_dimension(i, true);
}
}
auto scale_times_rsqrt_var_add_epsilon =
add_binary(scale_times_rsqrt_var_add_epsilon_shape, HloOpcode::kMultiply,
scale_broadcasted, rsqrt_var_add_epsilon_broadcasted);
scale_times_rsqrt_var_add_epsilon =
add(Mean(elements_per_feature, scale_times_rsqrt_var_add_epsilon, add));
auto i1 = add_binary(grad_output->shape(), HloOpcode::kMultiply, grad_output,
add(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(activation_shape),
elements_per_feature, {})));
auto i6 = add_binary(
activation_shape, HloOpcode::kSubtract,
add_binary(activation_shape, HloOpcode::kSubtract, i1, i2), i5);
auto grad_activation = add_binary(activation_shape, HloOpcode::kMultiply,
scale_times_rsqrt_var_add_epsilon, i6);
auto tuple =
HloInstruction::CreateTuple({grad_activation, grad_scale, grad_beta});
if (batch_norm->has_sharding()) {
const HloSharding& sharding = batch_norm->sharding();
int64_t instruction_count_after = computation_->instruction_count();
CHECK_EQ(instruction_count_after,
instruction_count_before + added_instructions.size());
HloSharding activation_sharding =
sharding.GetAsShapeTree(batch_norm->shape()).element({0});
auto unique_device = batch_norm->sharding_unique_device();
HloSharding default_sharding =
unique_device.has_value()
? HloSharding::AssignDevice(unique_device.value())
: HloSharding::Replicate();
for (HloInstruction* inst : added_instructions) {
if (ShapeUtil::Equal(inst->shape(), activation_shape)) {
inst->set_sharding(activation_sharding);
} else {
inst->set_sharding(default_sharding);
}
}
tuple->set_sharding(sharding);
}
TF_CHECK_OK(ReplaceWithNewInstruction(batch_norm, std::move(tuple)));
return absl::OkStatus();
}
absl::StatusOr<bool> BatchNormExpander::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), before:\n" + module->ToString());
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (BatchNormExpanderVisitor::Run(computation, rewrite_training_op_,
rewrite_inference_op_,
rewrite_grad_op_)) {
changed = true;
}
}
XLA_VLOG_LINES(2, "BatchNormExpander::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/batchnorm_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class BatchNormExpanderTest : public HloTestBase {
protected:
int64_t CountGetDimensionSize(const HloModule& module) {
int64_t count = 0;
for (HloComputation* comp : module.computations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->opcode() == HloOpcode::kGetDimensionSize) {
count++;
}
}
}
return count;
}
};
TEST_F(BatchNormExpanderTest, BatchNormTraining) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
Shape scale_shape = ShapeUtil::MakeShape(F32, {2});
Shape offset_shape = ShapeUtil::MakeShape(F32, {2});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "activation"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scale_shape, "scale"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, offset_shape, "offset"));
builder.AddInstruction(HloInstruction::CreateBatchNormTraining(
ShapeUtil::MakeTupleShape({input_shape, scale_shape, offset_shape}),
param0, param1, param2,
0.001, 3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBatchNormTraining);
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(CountGetDimensionSize(*module), 3);
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
TEST_F(BatchNormExpanderTest, BatchNormGrad) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
Shape scale_shape = ShapeUtil::MakeShape(F32, {2});
Shape mean_shape = ShapeUtil::MakeShape(F32, {2});
Shape var_shape = ShapeUtil::MakeShape(F32, {2});
Shape grad_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 2});
HloComputation::Builder builder(TestName());
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "activation"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scale_shape, "scale"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, mean_shape, "mean"));
HloInstruction* param3 = builder.AddInstruction(
HloInstruction::CreateParameter(3, var_shape, "var"));
HloInstruction* param4 = builder.AddInstruction(
HloInstruction::CreateParameter(4, grad_output_shape, "grad_output"));
builder.AddInstruction(HloInstruction::CreateBatchNormGrad(
ShapeUtil::MakeTupleShape({input_shape, scale_shape, mean_shape}), param0,
param1, param2, param3, param4,
0.001, 3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kBatchNormGrad);
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(CountGetDimensionSize(*module), 3);
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
}
TEST_F(BatchNormExpanderTest, BatchNormTrainingSharding) {
const char* module_str = R"(
HloModule module
ENTRY entry {
%param.0 = f32[8,4] parameter(0)
%param.1 = f32[4] parameter(1)
%param.2 = f32[4] parameter(2)
ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
epsilon=0.001, feature_index=1, sharding={{maximal device=1},{maximal device=1},{maximal device=1}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
BatchNormExpander rewriter(true,
true,
true);
ASSERT_TRUE(rewriter.Run(m.get()).value());
for (auto* instruction : m->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
auto device = instruction->sharding_unique_device();
ASSERT_TRUE(device);
EXPECT_EQ(*device, 1);
}
}
TEST_F(BatchNormExpanderTest, Execution) {
const char* module_str = R"(
HloModule module
ENTRY entry {
%param.0 = f32[8,4] parameter(0)
%param.1 = f32[4] parameter(1)
%param.2 = f32[4] parameter(2)
ROOT %batch-norm-training = (f32[8,4], f32[4], f32[4])
batch-norm-training(f32[8,4] %param.0, f32[4] %param.1, f32[4] %param.2),
epsilon=0.001, feature_index=1, sharding={{maximal device=1},{maximal device=1},{maximal device=1}}
})";
EXPECT_TRUE(RunAndCompare(module_str, ErrorSpec{1e-4, 1e-4}));
}
}
} |
1,953 | cpp | tensorflow/tensorflow | tuple_util | third_party/xla/xla/service/tuple_util.cc | third_party/xla/xla/service/tuple_util_test.cc | #ifndef XLA_SERVICE_TUPLE_UTIL_H_
#define XLA_SERVICE_TUPLE_UTIL_H_
#include <cstdint>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
namespace xla {
class TupleUtil {
public:
static HloInstruction* ExtractPrefix(HloInstruction* input_tuple,
int64_t elements,
absl::string_view name = "");
static HloInstruction* AppendSuffix(
HloInstruction* input_tuple,
absl::Span<HloInstruction* const> trailing_values);
static HloInstruction* Duplicate(HloInstruction* input_tuple) {
return ExtractPrefix(input_tuple, input_tuple->shape().tuple_shapes_size());
}
static absl::StatusOr<HloInstruction*> ReplaceTupleWith(
HloInstruction* new_instruction, HloInstruction* tuple,
ShapeIndex shape_index, bool insert_bitcast_if_different_shape = true);
static HloInstruction* AddGetTupleElements(const HloPosition& position);
static ShapeTree<HloInstruction*> DisassembleTupleInstruction(
HloInstruction* tuple);
static HloInstruction* AssembleTupleInstruction(
HloComputation* computation, ShapeTree<HloInstruction*> elements,
absl::string_view name = "");
};
}
#endif
#include "xla/service/tuple_util.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
HloInstruction* TupleUtil::ExtractPrefix(HloInstruction* input_tuple,
int64_t elements,
absl::string_view name) {
CHECK(input_tuple->shape().IsTuple());
HloComputation* computation = input_tuple->parent();
const Shape& input_shape = input_tuple->shape();
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(elements);
for (int i = 0; i < elements; i++) {
std::string element_name;
if (!name.empty()) {
element_name = absl::StrCat(name, ".element.", i);
}
tuple_elements.push_back(computation->AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape.tuple_shapes(i),
input_tuple, i),
element_name));
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements), name);
}
HloInstruction* TupleUtil::AppendSuffix(
HloInstruction* input_tuple,
absl::Span<HloInstruction* const> trailing_values) {
CHECK(input_tuple->shape().IsTuple());
HloComputation* computation = input_tuple->parent();
const Shape& input_shape = input_tuple->shape();
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(input_shape.tuple_shapes_size());
for (int i = 0; i < input_shape.tuple_shapes_size(); i++) {
tuple_elements.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
input_shape.tuple_shapes(i), input_tuple, i)));
}
tuple_elements.insert(tuple_elements.end(), trailing_values.begin(),
trailing_values.end());
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
absl::StatusOr<HloInstruction*> TupleUtil::ReplaceTupleWith(
HloInstruction* new_instruction, HloInstruction* tuple,
ShapeIndex shape_index, bool insert_bitcast_if_different_shape) {
const Shape& tuple_shape = tuple->shape();
CHECK(tuple->shape().IsTuple())
<< "ReplaceTupleWith was called for a non-tuple. Tuple = "
<< tuple->ToString()
<< ", new_instruction = " << new_instruction->ToString()
<< ", shape_index = " << shape_index.ToString();
const HloInstruction* instruction = new_instruction;
bool equivalent = true;
for (int i = shape_index.size() - 1; i >= 0; --i) {
int index = shape_index[i];
if (instruction->opcode() != HloOpcode::kGetTupleElement ||
instruction->tuple_index() != index) {
equivalent = false;
break;
}
instruction = instruction->operand(0);
}
if (equivalent && instruction == tuple) {
VLOG(4) << "Instruction " << new_instruction->ToShortString()
<< " already exists at index " << shape_index.ToString() << " of "
<< tuple->ToShortString();
return tuple;
}
HloComputation* computation = new_instruction->parent();
std::vector<HloInstruction*> tuple_args(tuple_shape.tuple_shapes_size());
CHECK_GE(tuple_shape.tuple_shapes_size(), shape_index[0]);
for (int i = 0; i < tuple_shape.tuple_shapes_size(); ++i) {
const Shape& subshape = tuple_shape.tuple_shapes(i);
auto get_operand = [&]() {
if (tuple->opcode() == HloOpcode::kTuple) {
return tuple->mutable_operand(i);
} else {
return computation->AddInstruction(
HloInstruction::CreateGetTupleElement(subshape, tuple, i));
}
};
if (i == shape_index[0]) {
if (subshape.IsTuple()) {
TF_ASSIGN_OR_RETURN(tuple_args[i],
ReplaceTupleWith(new_instruction, get_operand(),
ShapeIndex(shape_index.begin() + 1,
shape_index.end())));
} else {
if (subshape != new_instruction->shape() &&
insert_bitcast_if_different_shape) {
VLOG(4) << "Old shape = " << subshape.ToString()
<< ", new shape = " << new_instruction->shape().ToString()
<< "; inserting a bitcast.";
new_instruction = computation->AddInstruction(
HloInstruction::CreateBitcast(subshape, new_instruction));
} else if (tuple->opcode() == HloOpcode::kTuple &&
tuple->operand(i) == new_instruction) {
VLOG(4) << "Tuple already contains the new instruction = "
<< new_instruction->ToShortString()
<< " tuple = " << tuple->ToShortString();
return tuple;
}
tuple_args[i] = new_instruction;
}
} else {
tuple_args[i] = get_operand();
}
}
if (shape_index[0] == tuple_shape.tuple_shapes_size()) {
tuple_args.push_back(new_instruction);
}
return computation->AddInstruction(HloInstruction::CreateTuple(tuple_args));
}
HloInstruction* TupleUtil::AddGetTupleElements(
const HloPosition& position) {
HloInstruction* instruction = position.instruction;
HloComputation* computation = instruction->parent();
for (int64_t index : position.index) {
auto gte_it = absl::c_find_if(
instruction->users(), [index](const HloInstruction* use) {
return use != use->parent()->root_instruction() &&
use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index;
});
if (gte_it != instruction->users().end()) {
instruction = *gte_it;
} else {
instruction =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
instruction->shape().tuple_shapes(index), instruction, index));
}
}
return instruction;
}
ShapeTree<HloInstruction*> TupleUtil::DisassembleTupleInstruction(
HloInstruction* tuple) {
const Shape& shape = tuple->shape();
ShapeTree<HloInstruction*> result(shape);
result.ForEachMutableElement([&](ShapeIndexView index,
HloInstruction** element) {
if (index.empty()) {
*element = tuple;
} else {
ShapeIndexView parent_index = index.subspan(0, index.size() - 1);
HloInstruction* parent = result.element(parent_index);
std::string name = absl::StrCat(tuple->name(), ".disassembled.",
absl::StrJoin(index, "."));
*element = tuple->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(parent, index.back()), name);
}
});
return result;
}
HloInstruction* TupleUtil::AssembleTupleInstruction(
HloComputation* computation, ShapeTree<HloInstruction*> elements,
absl::string_view name) {
elements.ForEachMutableElementPostOrder(
[&](const ShapeIndex& index, HloInstruction** element) {
const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);
if (subshape.IsTuple()) {
absl::InlinedVector<HloInstruction*, 2> children;
ShapeIndex child_index = index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(elements.element(child_index));
child_index.pop_back();
}
std::string new_name;
if (!name.empty()) {
if (index.empty()) {
new_name = std::string(name);
} else {
new_name =
absl::StrCat(name, ".assembled.", absl::StrJoin(index, "."));
}
}
*element = computation->AddInstruction(
HloInstruction::CreateTuple(children), new_name);
}
});
return elements.element({});
}
} | #include "xla/service/tuple_util.h"
#include <memory>
#include <string>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using TupleUtilTest = HloTestBase;
TEST_F(TupleUtilTest, ExtractPrefix) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)
ROOT p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
HloInstruction* prefix = TupleUtil::ExtractPrefix(param0, 2);
EXPECT_THAT(prefix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(TupleUtilTest, AppendSuffix) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)
ROOT p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
HloInstruction* param1 =
module->entry_computation()->parameter_instruction(1);
HloInstruction* with_suffix =
TupleUtil::AppendSuffix(param0, {param1, param1});
EXPECT_THAT(with_suffix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1),
op::GetTupleElement(op::Parameter(0), 2),
op::Parameter(1), op::Parameter(1)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithTupleInst) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p0, tuple, {1}));
EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(0), op::Parameter(0)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInst) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
ROOT p0 = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* p1 = FindInstruction(module.get(), "p1");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p1, p0, {0}));
EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(1),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInstNested) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
ROOT p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* p1 = FindInstruction(module.get(), "p1");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p1, p0, {1, 0}));
EXPECT_THAT(
new_tuple,
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::Tuple(op::Parameter(1),
op::GetTupleElement(
op::GetTupleElement(op::Parameter(0), 1), 1))));
}
TEST_F(TupleUtilTest, AddGetTupleElements) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)
gte = (f32[32,32]{1,0}, f32[32,32]{1,0}) get-tuple-element(p0), index=1
ROOT root = f32[32,32]{1,0} get-tuple-element(gte), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* existing_gte = FindInstruction(module.get(), "gte");
HloInstruction* new_gte = TupleUtil::AddGetTupleElements({p0, {1, 0}});
EXPECT_THAT(new_gte, op::GetTupleElement(existing_gte, 0));
}
}
} |
1,954 | cpp | tensorflow/tensorflow | root_instruction_sinker | third_party/xla/xla/service/root_instruction_sinker.cc | third_party/xla/xla/service/root_instruction_sinker_test.cc | #ifndef XLA_SERVICE_ROOT_INSTRUCTION_SINKER_H_
#define XLA_SERVICE_ROOT_INSTRUCTION_SINKER_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class RootInstructionSinker : public HloModulePass {
public:
~RootInstructionSinker() override = default;
absl::string_view name() const override { return "root-instruction-sinker"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/root_instruction_sinker.h"
#include "xla/service/tuple_util.h"
namespace xla {
namespace {
void SinkTupleRoot(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
CHECK(root->shape().IsTuple());
HloInstruction* new_root = TupleUtil::Duplicate(root);
HloInstructionSequence& sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
for (HloInstruction* operand : new_root->operands()) {
sequence.push_back(operand);
}
sequence.push_back(new_root);
computation->set_root_instruction(new_root);
}
void SinkNontupleRoot(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
CHECK(!root->shape().IsTuple());
HloInstruction* new_root = computation->AddInstruction(
HloInstruction::CreateBitcast(root->shape(), root));
HloInstructionSequence& sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
sequence.push_back(new_root);
computation->set_root_instruction(new_root);
}
}
absl::StatusOr<bool> RootInstructionSinker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_RET_CHECK(module->has_schedule());
bool modified = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
HloInstructionSequence& sequence =
module->schedule().GetOrCreateSequence(computation);
if (computation->root_instruction() ==
sequence.instructions().at(sequence.size() - 1)) {
continue;
}
if (computation->root_instruction()->shape().IsTuple()) {
SinkTupleRoot(computation);
} else {
SinkNontupleRoot(computation);
}
modified = true;
}
return modified;
}
} | #include "xla/service/root_instruction_sinker.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using RootInstructionSinkerTest = HloTestBase;
TEST_F(RootInstructionSinkerTest, TupleNoChange) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto while_body =
module->entry_computation()->root_instruction()->while_body();
int num_body_instructions = while_body->instruction_count();
RootInstructionSinker sinker;
EXPECT_FALSE(sinker.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()
->root_instruction()
->while_body()
->instruction_count(),
num_body_instructions);
}
TEST_F(RootInstructionSinkerTest, Tuple) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
after-all = token[] after-all()
send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1
send-done = token[] send-done(send), channel_id=1
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RootInstructionSinker sinker;
EXPECT_TRUE(sinker.Run(module.get()).value());
auto while_body =
module->entry_computation()->root_instruction()->while_body();
const auto& sequence = module->schedule().sequence(while_body);
EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),
while_body->root_instruction());
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(op::Tuple()),
op::GetTupleElement(op::Tuple())));
}
TEST_F(RootInstructionSinkerTest, NontupleNoChange) {
absl::string_view hlo_string = R"(
HloModule Call, is_scheduled=true
Call {
param = s32[3]{0} parameter(0)
ROOT multiply = s32[3]{0} multiply(param, param)
}
ENTRY While {
constant.4 = s32[3]{0} constant({0, 1, 2})
ROOT call = s32[3]{0} call(constant.4), to_apply=Call
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto called_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
int num_instructions = called_computation->instruction_count();
RootInstructionSinker sinker;
EXPECT_FALSE(sinker.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()
->root_instruction()
->called_computations()[0]
->instruction_count(),
num_instructions);
}
TEST_F(RootInstructionSinkerTest, Nontuple) {
absl::string_view hlo_string = R"(
HloModule Call, is_scheduled=true
Call {
param = s32[3]{0} parameter(0)
ROOT multiply = s32[3]{0} multiply(param, param)
after-all = token[] after-all()
send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1
send-done = token[] send-done(send), channel_id=1
}
ENTRY While {
constant.4 = s32[3]{0} constant({0, 1, 2})
ROOT call = s32[3]{0} call(constant.4), to_apply=Call
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RootInstructionSinker sinker;
EXPECT_TRUE(sinker.Run(module.get()).value());
auto called_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const auto& sequence = module->schedule().sequence(called_computation);
EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),
called_computation->root_instruction());
EXPECT_THAT(called_computation->root_instruction(),
op::Bitcast(op::Multiply()));
}
}
} |
1,955 | cpp | tensorflow/tensorflow | dot_decomposer | third_party/xla/xla/service/dot_decomposer.cc | third_party/xla/xla/service/dot_decomposer_test.cc | #ifndef XLA_SERVICE_DOT_DECOMPOSER_H_
#define XLA_SERVICE_DOT_DECOMPOSER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class DotDecomposer : public HloModulePass {
public:
absl::string_view name() const override { return "dot_decomposer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/dot_decomposer.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::Status CanonicalizeDot(HloDotInstruction* original_dot) {
auto computation = original_dot->parent();
const auto& original_dnums = original_dot->dot_dimension_numbers();
const int64_t num_batch_dims = original_dnums.lhs_batch_dimensions_size();
const int64_t num_contracting_dims =
original_dnums.lhs_contracting_dimensions_size();
int lhs_sparse_dim = -1, rhs_sparse_dim = -1;
for (const SparsityDescriptor& descriptor : original_dot->sparsity()) {
(descriptor.index() == 0 ? lhs_sparse_dim : rhs_sparse_dim) =
descriptor.dimension();
}
auto move_dim_to_end = [&](std::vector<int64_t>& dims, int sparse_dim) {
if (sparse_dim < 0) return;
auto it = std::remove(dims.begin(), dims.end(), sparse_dim);
*it = sparse_dim;
};
const auto& lhs_shape = original_dot->operand(0)->shape();
const int64_t lhs_rank = lhs_shape.rank();
const int64_t num_lhs_non_contracting_dims =
lhs_rank - num_batch_dims - num_contracting_dims;
std::vector<int64_t> lhs_non_contracting_dims;
lhs_non_contracting_dims.reserve(num_lhs_non_contracting_dims);
int64_t lhs_contracting_size = 1;
bool lhs_contracting_dynamic = false;
int64_t lhs_non_contracting_size = 1;
bool lhs_non_contracting_dynamic = false;
std::vector<int64_t> batch_dim_sizes;
batch_dim_sizes.reserve(num_batch_dims);
std::vector<bool> batch_dynamic_dims;
batch_dynamic_dims.reserve(num_batch_dims);
for (int64_t i = 0; i < lhs_rank; ++i) {
if (absl::c_linear_search(original_dnums.lhs_contracting_dimensions(), i)) {
lhs_contracting_size *= lhs_shape.dimensions(i);
lhs_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);
} else if (absl::c_linear_search(original_dnums.lhs_batch_dimensions(),
i)) {
batch_dim_sizes.push_back(lhs_shape.dimensions(i));
batch_dynamic_dims.push_back(lhs_shape.is_dynamic_dimension(i));
} else {
lhs_non_contracting_dims.push_back(i);
lhs_non_contracting_size *= lhs_shape.dimensions(i);
lhs_non_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);
}
}
std::vector<int64_t> lhs_transpose;
lhs_transpose.reserve(lhs_rank);
lhs_transpose.insert(lhs_transpose.end(),
original_dnums.lhs_batch_dimensions().begin(),
original_dnums.lhs_batch_dimensions().end());
lhs_transpose.insert(lhs_transpose.end(), lhs_non_contracting_dims.begin(),
lhs_non_contracting_dims.end());
lhs_transpose.insert(lhs_transpose.end(),
original_dnums.lhs_contracting_dimensions().begin(),
original_dnums.lhs_contracting_dimensions().end());
move_dim_to_end(lhs_transpose, lhs_sparse_dim);
HloInstruction* lhs_operand = original_dot->mutable_operand(0);
HloInstruction* transposed_lhs = computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(lhs_transpose, lhs_shape), lhs_operand,
lhs_transpose),
&lhs_operand->metadata());
std::vector<int64_t> lhs_reshape_dims = batch_dim_sizes;
std::vector<bool> lhs_reshape_dynamic_dims = batch_dynamic_dims;
if (lhs_non_contracting_size > 1) {
lhs_reshape_dims.push_back(lhs_non_contracting_size);
lhs_reshape_dynamic_dims.push_back(lhs_non_contracting_dynamic);
}
lhs_reshape_dims.push_back(lhs_contracting_size);
lhs_reshape_dynamic_dims.push_back(lhs_contracting_dynamic);
HloInstruction* reshaped_lhs = computation->AddInstruction(
HloInstruction::CreateReshape(
ShapeUtil::MakeShape(lhs_shape.element_type(), lhs_reshape_dims,
lhs_reshape_dynamic_dims),
transposed_lhs),
&transposed_lhs->metadata());
const auto& rhs_shape = original_dot->operand(1)->shape();
const int64_t rhs_rank = rhs_shape.rank();
const int64_t num_rhs_non_contracting_dims =
rhs_rank - num_batch_dims - num_contracting_dims;
std::vector<int64_t> rhs_non_contracting_dims;
rhs_non_contracting_dims.reserve(num_rhs_non_contracting_dims);
int64_t rhs_non_contracting_size = 1;
bool rhs_non_contracting_dynamic = false;
int64_t rhs_contracting_size = 1;
bool rhs_contracting_dynamic = false;
for (int64_t i = 0; i < rhs_rank; ++i) {
if (absl::c_linear_search(original_dnums.rhs_contracting_dimensions(), i)) {
rhs_contracting_size *= rhs_shape.dimensions(i);
rhs_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);
} else if (!absl::c_linear_search(original_dnums.rhs_batch_dimensions(),
i)) {
rhs_non_contracting_dims.push_back(i);
rhs_non_contracting_size *= rhs_shape.dimensions(i);
rhs_non_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);
}
}
std::vector<int64_t> rhs_transpose;
rhs_transpose.reserve(rhs_rank);
rhs_transpose.insert(rhs_transpose.end(),
original_dnums.rhs_batch_dimensions().begin(),
original_dnums.rhs_batch_dimensions().end());
rhs_transpose.insert(rhs_transpose.end(),
original_dnums.rhs_contracting_dimensions().begin(),
original_dnums.rhs_contracting_dimensions().end());
move_dim_to_end(rhs_transpose, rhs_sparse_dim);
rhs_transpose.insert(rhs_transpose.end(), rhs_non_contracting_dims.begin(),
rhs_non_contracting_dims.end());
HloInstruction* rhs_operand = original_dot->mutable_operand(1);
HloInstruction* transposed_rhs = computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(rhs_transpose, rhs_shape), rhs_operand,
rhs_transpose),
&rhs_operand->metadata());
std::vector<int64_t> rhs_reshape_dims = batch_dim_sizes;
rhs_reshape_dims.push_back(rhs_contracting_size);
std::vector<bool> rhs_reshape_dynamic_dims = batch_dynamic_dims;
rhs_reshape_dynamic_dims.push_back(rhs_contracting_dynamic);
if (rhs_non_contracting_size > 1) {
rhs_reshape_dims.push_back(rhs_non_contracting_size);
rhs_reshape_dynamic_dims.push_back(rhs_non_contracting_dynamic);
}
HloInstruction* reshaped_rhs = computation->AddInstruction(
HloInstruction::CreateReshape(
ShapeUtil::MakeShape(rhs_shape.element_type(), rhs_reshape_dims,
rhs_reshape_dynamic_dims),
transposed_rhs),
&transposed_rhs->metadata());
std::vector<int64_t> dot_dims = batch_dim_sizes;
std::vector<bool> dot_dynamic_dims = batch_dynamic_dims;
if (lhs_non_contracting_size > 1) {
dot_dims.push_back(lhs_non_contracting_size);
dot_dynamic_dims.push_back(lhs_non_contracting_dynamic);
}
if (rhs_non_contracting_size > 1) {
dot_dims.push_back(rhs_non_contracting_size);
dot_dynamic_dims.push_back(rhs_non_contracting_dynamic);
}
DotDimensionNumbers dot_dnums;
for (int64_t i = 0; i < num_batch_dims; ++i) {
dot_dnums.add_lhs_batch_dimensions(i);
dot_dnums.add_rhs_batch_dimensions(i);
}
dot_dnums.add_lhs_contracting_dimensions(
num_batch_dims + (lhs_non_contracting_size > 1 ? 1 : 0));
dot_dnums.add_rhs_contracting_dimensions(num_batch_dims);
std::vector<SparsityDescriptor> sparsity;
std::vector<HloInstruction*> sparse_meta;
sparsity.reserve(original_dot->sparse_operands());
sparse_meta.reserve(original_dot->sparse_operands());
auto transpose_meta = [&](HloInstruction* original_meta,
absl::Span<const int64_t> transpose) {
return computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(transpose, original_meta->shape()),
original_meta, transpose),
&original_meta->metadata());
};
for (int i = 0; i < original_dot->sparse_operands(); ++i) {
SparsityDescriptor descriptor = original_dot->sparsity()[i];
descriptor.set_dimension(num_batch_dims + (descriptor.index() == 0 &&
lhs_non_contracting_size > 1));
sparsity.push_back(descriptor);
HloInstruction* meta =
original_dot->mutable_operand(HloDotInstruction::kOperands + i);
HloInstruction* meta_operand;
if (descriptor.index() == 0) {
meta = transpose_meta(meta, lhs_transpose);
meta_operand = reshaped_lhs;
} else {
meta = transpose_meta(meta, rhs_transpose);
meta_operand = reshaped_rhs;
}
TF_ASSIGN_OR_RETURN(Shape result_shape,
ShapeInference::InferSparseDotMetadataShape(
meta_operand->shape(), dot_dnums, descriptor));
meta = computation->AddInstruction(
HloInstruction::CreateReshape(result_shape, meta), &meta->metadata());
sparse_meta.push_back(meta);
}
HloInstruction* dot = computation->AddInstruction(HloInstruction::CreateDot(
ShapeUtil::MakeShape(original_dot->shape().element_type(), dot_dims,
dot_dynamic_dims),
reshaped_lhs, reshaped_rhs, dot_dnums, original_dot->precision_config(),
sparsity, sparse_meta));
original_dot->SetupDerivedInstruction(dot);
std::unique_ptr<HloInstruction> replacement =
HloInstruction::CreateReshape(original_dot->shape(), dot);
VLOG(3) << "Canonicalizing dot:\n"
<< "\t old: " << original_dot->ToString() << "\n"
<< "\t new: " << dot->ToString() << "\n"
<< "\t -> " << replacement->ToString();
return computation->ReplaceWithNewInstruction(original_dot,
std::move(replacement));
}
}
absl::StatusOr<bool> DotDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> non_canonical_dots;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kDot) {
continue;
}
const DotDimensionNumbers& dnums = instruction->dot_dimension_numbers();
if (dnums.lhs_contracting_dimensions_size() != 1) {
non_canonical_dots.push_back(instruction);
continue;
}
if (dnums.lhs_batch_dimensions_size() + 2 <
instruction->operand(0)->shape().rank() ||
dnums.rhs_batch_dimensions_size() + 2 <
instruction->operand(1)->shape().rank()) {
non_canonical_dots.push_back(instruction);
continue;
}
if (dnums.lhs_batch_dimensions().empty() &&
dnums.lhs_contracting_dimensions().empty()) {
non_canonical_dots.push_back(instruction);
continue;
}
std::vector<int64_t> canonical_batch_dims(
dnums.lhs_batch_dimensions_size());
absl::c_iota(canonical_batch_dims, 0);
if (!absl::c_equal(dnums.lhs_batch_dimensions(), canonical_batch_dims) ||
!absl::c_equal(dnums.rhs_batch_dimensions(), canonical_batch_dims)) {
non_canonical_dots.push_back(instruction);
}
}
}
bool changed = false;
for (auto* dot : non_canonical_dots) {
TF_RETURN_IF_ERROR(CanonicalizeDot(Cast<HloDotInstruction>(dot)));
changed = true;
}
return changed;
}
} | #include "xla/service/dot_decomposer.h"
#include <memory>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
namespace op = ::xla::testing::opcode_matchers;
using DotDecomposerTest = HloTestBase;
TEST_F(DotDecomposerTest, CanonicalizeMultipleNonContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,63,512]{2,1,0} parameter(0)
p1 = f32[512,512]{1,0} parameter(1)
ROOT dot = f32[64,63,512]{2,1,0} dot(p0, p1), lhs_contracting_dims={2},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
1,
0),
op::Shape("f32[4032,512]"))));
}
TEST_F(DotDecomposerTest, DontCanonicalizeIfNoNoncontractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4]{1,0} parameter(0)
p1 = f32[64,4]{1,0} parameter(1)
ROOT dot = f32[64]{0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_FALSE(canonicalized);
}
TEST_F(DotDecomposerTest, DontAddLhsNonContractingDimIfOne) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4]{1,0} parameter(0)
p1 = f32[64,4,2,1]{3,2,1,0} parameter(1)
ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
1,
1),
op::Shape("f32[64,2]"))));
}
TEST_F(DotDecomposerTest, DontAddRhsNonContractingDimIfOne) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4,2,1]{3,2,1,0} parameter(0)
p1 = f32[64,4]{1,0} parameter(1)
ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
2,
1),
op::Shape("f32[64,2]"))));
}
template <typename Arg0, typename Arg1, typename Arg2>
auto SparseDotMatcher(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) {
return match::Op()
.WithOpcode(HloOpcode::kDot)
.WithOperand(0, std::forward<Arg0>(arg0))
.WithOperand(1, std::forward<Arg1>(arg1))
.WithOperand(2, std::forward<Arg2>(arg2));
}
TEST_F(DotDecomposerTest, CanonicalizeSparseLhs) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
lhs = f32[16,4,3,7] parameter(0)
rhs = f32[32,4,5,7] parameter(1)
meta = u16[2,4,3,7] parameter(2)
ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=L.0@2:4,
lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},
lhs_batch_dims={3}, rhs_batch_dims={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(
m::Reshape(m::Transpose(m::Parameter(0))),
m::Reshape(m::Transpose(m::Parameter(1))),
m::Reshape(m::Transpose(m::Parameter(2)))))));
auto dot = Cast<HloDotInstruction>(root->operand(0));
auto descriptor = dot->sparsity().front();
EXPECT_EQ(descriptor.index(), 0);
EXPECT_EQ(descriptor.dimension(), 2);
}
TEST_F(DotDecomposerTest, CanonicalizeSparseRhs) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
lhs = f32[32,4,3,7] parameter(0)
rhs = f32[16,4,5,7] parameter(1)
meta = u16[2,4,5,7] parameter(2)
ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=R.0@2:4,
lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},
lhs_batch_dims={3}, rhs_batch_dims={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(
m::Reshape(m::Transpose(m::Parameter(0))),
m::Reshape(m::Transpose(m::Parameter(1))),
m::Reshape(m::Transpose(m::Parameter(2)))))));
auto dot = Cast<HloDotInstruction>(root->operand(0));
auto descriptor = dot->sparsity().front();
EXPECT_EQ(descriptor.index(), 1);
EXPECT_EQ(descriptor.dimension(), 1);
}
}
} |
1,956 | cpp | tensorflow/tensorflow | fusion_constant_sinking | third_party/xla/xla/service/fusion_constant_sinking.cc | third_party/xla/xla/service/fusion_constant_sinking_test.cc | #ifndef XLA_SERVICE_FUSION_CONSTANT_SINKING_H_
#define XLA_SERVICE_FUSION_CONSTANT_SINKING_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class FusionConstantSinking : public HloModulePass {
public:
absl::string_view name() const override { return "fusion_constant_sinking"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/fusion_constant_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
bool CanSink(HloInstruction* fusion, const HloInstruction* operand) {
if (!fusion->IsLoopFusion() && !fusion->IsOutputFusion()) {
return false;
}
if (fusion->operand_count() == 1) {
return false;
}
if (!ShapeUtil::IsScalar(operand->shape()) || !operand->IsConstant()) {
return false;
}
int64_t operand_idx = fusion->operand_index(operand);
HloInstruction* fused_param = fusion->fused_parameter(operand_idx);
for (HloInstruction* user : fused_param->users()) {
if (user->opcode() == HloOpcode::kFusion && user->operand_count() == 1) {
return false;
}
}
return true;
}
bool ProcessScalar(HloInstruction* scalar) {
if (!ShapeUtil::IsScalar(scalar->shape()) || !scalar->IsConstant()) {
return false;
}
bool processed = false;
std::vector<HloInstruction*> sinkable_users;
for (HloInstruction* use : scalar->users()) {
if (CanSink(use, scalar)) {
sinkable_users.push_back(use);
}
}
for (HloInstruction* use : sinkable_users) {
HloInstruction* fused_scalar = use->FuseInstruction(scalar);
processed = true;
ProcessScalar(fused_scalar);
}
return processed;
}
absl::StatusOr<bool> FusionConstantSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(3) << "HLO module before FusionConstantSinking:";
XLA_VLOG_LINES(3, module->ToString());
bool changed = false;
for (HloComputation* c : module->MakeNonfusionComputations()) {
for (HloInstruction* i : c->MakeInstructionPostOrder()) {
changed |= ProcessScalar(i);
}
}
if (changed) {
TF_ASSIGN_OR_RETURN(bool dce, HloDCE{}.Run(module, execution_threads));
changed |= dce;
}
VLOG(3) << "HLO module after FusionConstantSinking:";
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
} | #include "xla/service/fusion_constant_sinking.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using FusionConstantSinkingTest = HloTestBase;
TEST_F(FusionConstantSinkingTest, SinkConstant) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[56,4096,4096], param_1: s32[]) -> s8[1,4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%constant.85694 = s32[]{:T(128)} constant(0)
ROOT %dynamic-slice.22040 = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} dynamic-slice(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117, s32[]{:T(128)} p1, s32[]{:T(128)} %constant.85694, s32[]{:T(128)} %constant.85694), dynamic_slice_sizes={1,4096,4096}
}
ENTRY main {
p0 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c), kind=kLoop, calls=%fused_computation.slice
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(
module->GetComputationWithName("fused_computation.slice")
->root_instruction(),
GmockMatch(match::DynamicSlice(match::Parameter(0), match::Constant(),
match::Constant(), match::Constant())));
}
TEST_F(FusionConstantSinkingTest, SingleOperandFusionNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation (param_1: s8[]) -> s8[1,4096,4096] {
param0 = s8[] parameter(0)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} broadcast(param0), dimensions={}
}
ENTRY main {
c = s8[]{:T(128)} constant(10)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[]{:T(128)} c), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, SingleOperandUserNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.inner (param_1: s32[]) -> s32[] {
p1 = s32[]{:T(128)} parameter(0)
%constant.85694 = s32[]{:T(128)} constant(10)
ROOT out = s32[] add(p1, %constant.85694)
}
%fused_computation (param_0.51117: s32[4096,4096], param_1:
s32[]) -> s32[4096,4096] {
%param_0.51117 = s32[4096,4096]{1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%inner.fusion = s32[] fusion(s32[]{:T(128)} p1), kind=kLoop, calls=%fused_computation.inner
%broadcast = s32[4096,4096]{1,0:T(8,128)(4,1)} broadcast(%inner.fusion), dimensions={}
ROOT out = s32[4096,4096] add(%broadcast, %param_0.51117)
}
ENTRY main {
p0 = s32[4096,4096]{1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s32[4096,4096]{1,0:T(8,128)(4,1)}
fusion(s32[4096,4096]{1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, NonScalarNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation (param_1: s8[2], p1: s8[2,4096,4096]) -> s8[2,4096,4096] {
param0 = s8[2] parameter(0)
param1 = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(1)
bcast = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} broadcast(param0), dimensions={0}
ROOT out = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} add(param1, bcast)
}
ENTRY main {
p = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s8[2]{0:T(128)} constant({10,20})
ROOT out = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[2]{0:T(128)} c, p), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, SinkConstantNested) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.inner (param_0.51117: s8[56,4096,4096], param_1:
s32[]) -> s8[1,4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%constant.85694 = s32[]{:T(128)} constant(0)
ROOT %dynamic-slice.22040 = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)}
dynamic-slice(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117,
s32[]{:T(128)} p1, s32[]{:T(128)} %constant.85694, s32[]{:T(128)}
%constant.85694), dynamic_slice_sizes={1,4096,4096}
}
%fused_computation (param_0.51117: s8[56,4096,4096], param_1:
s32[]) -> s8[4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%inner.fusion = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117, s32[]{:T(128)} p1), kind=kLoop, calls=%fused_computation.inner
ROOT %bitcast = s8[4096,4096]{1,0:T(8,128)(4,1)} bitcast(s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} %inner.fusion)
}
ENTRY main {
p0 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s8[4096,4096]{1,0:T(8,128)(4,1)}
fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c),
kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(
module->GetComputationWithName("fused_computation")->num_parameters(), 1);
EXPECT_THAT(module->GetComputationWithName("fused_computation.inner")
->num_parameters(),
1);
}
}
} |
1,957 | cpp | tensorflow/tensorflow | bfloat16_conversion_folding | third_party/xla/xla/service/bfloat16_conversion_folding.cc | third_party/xla/xla/service/bfloat16_conversion_folding_test.cc | #ifndef XLA_SERVICE_BFLOAT16_CONVERSION_FOLDING_H_
#define XLA_SERVICE_BFLOAT16_CONVERSION_FOLDING_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class BFloat16ConversionFolding : public HloModulePass {
public:
explicit BFloat16ConversionFolding(const FloatSupport* bfloat16_support)
: bfloat16_support_(bfloat16_support) {
DCHECK(bfloat16_support->LowPrecisionType() == BF16);
}
~BFloat16ConversionFolding() override = default;
absl::string_view name() const override { return "bfloat16-fold"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const FloatSupport* bfloat16_support_;
};
}
#endif
#include "xla/service/bfloat16_conversion_folding.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
class BFloat16ConversionFoldingVisitor : public DfsHloVisitorWithDefault {
public:
explicit BFloat16ConversionFoldingVisitor(
HloComputation* computation, const FloatSupport* bfloat16_support,
BFloat16ConversionFolding* bfloat16_conversion_folding)
: computation_(computation),
bfloat16_support_(bfloat16_support),
bfloat16_conversion_folding_(bfloat16_conversion_folding) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status HandleAllReduce(HloInstruction* crs) override;
static bool Run(HloComputation* computation,
const FloatSupport* bfloat16_support,
BFloat16ConversionFolding* bfloat16_conversion_folding) {
BFloat16ConversionFoldingVisitor visitor(computation, bfloat16_support,
bfloat16_conversion_folding);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
private:
absl::Status TryFoldBF16Conversions(HloInstruction* hlo);
absl::Status FoldOutputConversions(HloInstruction* hlo);
absl::Status FoldOperandConversion(HloInstruction* hlo,
int64_t operand_index);
HloComputation* computation_;
const FloatSupport* bfloat16_support_;
BFloat16ConversionFolding* bfloat16_conversion_folding_;
bool changed_ = false;
};
absl::Status BFloat16ConversionFoldingVisitor::FoldOutputConversions(
HloInstruction* hlo) {
std::vector<HloInstruction*> materialized_users = hlo->users();
hlo->mutable_shape()->set_element_type(BF16);
bfloat16_conversion_folding_->UpdateLayout(hlo->mutable_shape());
for (auto user : materialized_users) {
CHECK_EQ(user->opcode(), HloOpcode::kConvert);
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(hlo));
changed_ = true;
}
return absl::OkStatus();
}
absl::Status BFloat16ConversionFoldingVisitor::FoldOperandConversion(
HloInstruction* hlo, int64_t operand_index) {
auto operand = hlo->mutable_operand(operand_index);
CHECK_EQ(operand->opcode(), HloOpcode::kConvert);
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWith(operand_index, operand->mutable_operand(0)));
changed_ = true;
return absl::OkStatus();
}
namespace {
bool AllUsersAreF32ToBF16Converts(const HloInstruction* hlo) {
if (hlo->user_count() == 0 || hlo->shape().element_type() != F32) {
return false;
}
for (const auto user : hlo->users()) {
if (user->opcode() == HloOpcode::kConvert &&
user->shape().element_type() == BF16) {
continue;
}
return false;
}
return true;
}
}
absl::Status BFloat16ConversionFoldingVisitor::TryFoldBF16Conversions(
HloInstruction* hlo) {
std::vector<int64_t> bf16_to_f32_operands;
bool has_other_f32_operands = false;
for (int64_t i = 0; i < hlo->operands().size(); ++i) {
auto operand = hlo->operand(i);
if (operand->shape().element_type() == F32) {
if (operand->opcode() == HloOpcode::kConvert &&
operand->operand(0)->shape().element_type() == BF16 &&
bfloat16_support_->SupportsLowPrecisionOperand(*hlo, i)) {
bf16_to_f32_operands.push_back(i);
} else {
has_other_f32_operands = true;
}
continue;
}
}
const bool fold_output_conversion =
AllUsersAreF32ToBF16Converts(hlo) &&
bfloat16_support_->SupportsLowPrecisionOutput(*hlo);
if (!bfloat16_support_->SupportsMixedPrecisions(*hlo)) {
if (has_other_f32_operands ||
(!fold_output_conversion && hlo->shape().element_type() == F32)) {
return absl::OkStatus();
}
}
if (fold_output_conversion) {
TF_RETURN_IF_ERROR(FoldOutputConversions(hlo));
}
for (int64_t i : bf16_to_f32_operands) {
TF_RETURN_IF_ERROR(FoldOperandConversion(hlo, i));
}
return absl::OkStatus();
}
absl::Status BFloat16ConversionFoldingVisitor::DefaultAction(
HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kFusion ||
hlo->opcode() == HloOpcode::kBitcastConvert ||
hlo->opcode() == HloOpcode::kConvert ||
hlo->opcode() == HloOpcode::kCall ||
hlo->opcode() == HloOpcode::kCustomCall ||
hlo->opcode() == HloOpcode::kWhile ||
hlo->opcode() == HloOpcode::kConditional ||
HloDataflowAnalysis::IsInPlaceOperation(hlo->opcode()) ||
hlo->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
if (hlo == computation_->root_instruction() &&
!bfloat16_support_->SupportsMixedPrecisions(*hlo)) {
return absl::OkStatus();
}
return TryFoldBF16Conversions(hlo);
}
absl::Status BFloat16ConversionFoldingVisitor::HandleAllReduce(
HloInstruction* crs) {
if (crs->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(DefaultAction(crs));
if (!bfloat16_support_->SupportsMixedPrecisions(*crs)) {
return absl::OkStatus();
}
if (!crs->shape().IsTuple()) {
return absl::OkStatus();
}
if (crs == computation_->root_instruction()) {
return absl::OkStatus();
}
std::vector<std::vector<HloInstruction*>> per_tuple_element_gtes(
crs->operand_count());
for (auto user : crs->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return absl::OkStatus();
}
per_tuple_element_gtes[user->tuple_index()].push_back(user);
}
for (int64_t i = 0; i < crs->operand_count(); ++i) {
auto all_gte_users_are_bf16_convert = [&per_tuple_element_gtes, i]() {
if (per_tuple_element_gtes[i].empty()) {
return false;
}
for (auto gte : per_tuple_element_gtes[i]) {
if (!AllUsersAreF32ToBF16Converts(gte)) {
return false;
}
}
return true;
};
if (!all_gte_users_are_bf16_convert()) {
continue;
}
ShapeUtil::GetMutableSubshape(crs->mutable_shape(), {i})
->set_element_type(BF16);
bfloat16_conversion_folding_->UpdateLayout(
ShapeUtil::GetMutableSubshape(crs->mutable_shape(), {i}));
for (auto gte : per_tuple_element_gtes[i]) {
TF_RETURN_IF_ERROR(FoldOutputConversions(gte));
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> BFloat16ConversionFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "BFloat16ConversionFolding::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (BFloat16ConversionFoldingVisitor::Run(comp, bfloat16_support_, this)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "BFloat16ConversionFolding::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/bfloat16_conversion_folding.h"
#include <cstdint>
#include <optional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
class TestBFloat16Support : public FloatSupport {
public:
TestBFloat16Support() : FloatSupport(BF16) {}
~TestBFloat16Support() override {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
};
class BFloat16ConversionFoldingTest : public HloTestBase {
protected:
BFloat16ConversionFoldingTest()
: HloTestBase(false,
true) {}
bool FoldConversions(HloModule* module) {
TestBFloat16Support bfloat16_support_;
BFloat16ConversionFolding fold(&bfloat16_support_);
absl::StatusOr<bool> result = fold.Run(module);
EXPECT_IS_OK(result.status());
return result.value();
}
};
TEST_F(BFloat16ConversionFoldingTest, FoldIfSupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, convert1, c));
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), BF16);
EXPECT_EQ(add1->operand(0), add0);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldIfUnsupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kMultiply, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, mul0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* mul1 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kMultiply, convert1, c));
HloInstruction* convert2 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, mul1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert2);
EXPECT_EQ(mul0->shape().element_type(), F32);
EXPECT_EQ(mul1->shape().element_type(), F32);
EXPECT_EQ(mul1->operand(0), convert1);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldUnsupportedMixedPrecision) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* sub0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kSubtract, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, sub0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* sub1 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kSubtract, convert1, c));
HloInstruction* convert2 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, sub1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert2);
EXPECT_EQ(sub0->shape().element_type(), F32);
EXPECT_EQ(sub1->shape().element_type(), F32);
EXPECT_EQ(sub1->operand(0), convert1);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldTuple) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, b));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({a, convert0}));
HloInstruction* gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 0));
HloInstruction* convert1 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert1);
EXPECT_EQ(gte->shape().element_type(), F32);
EXPECT_EQ(tuple->operand(1), convert0);
}
TEST_F(BFloat16ConversionFoldingTest, FoldAllReduceTupleOutput) {
auto builder = HloComputation::Builder(TestName());
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("add");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, x, y));
HloComputation* sum = module->AddEmbeddedComputation(sum_builder.Build());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* convert_a =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, a));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({f32_shape, f32_shape}), {convert_a, b}, sum,
CollectiveDeviceList(),
false,
std::nullopt, false));
HloInstruction* gte_a = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, crs, 0));
HloInstruction* gte_b = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, crs, 1));
HloInstruction* convert_gte_b =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte_b));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({gte_a, convert_gte_b}));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), tuple);
EXPECT_EQ(tuple->operand(0), gte_a);
EXPECT_EQ(tuple->operand(1), gte_b);
EXPECT_EQ(gte_a->shape().element_type(), F32);
EXPECT_EQ(gte_b->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0), a);
EXPECT_EQ(crs->operand(1), b);
EXPECT_EQ(a->shape().element_type(), BF16);
EXPECT_EQ(b->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {0}).element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), BF16);
}
} |
1,958 | cpp | tensorflow/tensorflow | transpose_folding | third_party/xla/xla/service/transpose_folding.cc | third_party/xla/xla/service/transpose_folding_test.cc | #ifndef XLA_SERVICE_TRANSPOSE_FOLDING_H_
#define XLA_SERVICE_TRANSPOSE_FOLDING_H_
#include <functional>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class TransposeFolding : public HloModulePass {
public:
using OperandIndices = std::vector<int64_t>;
using TransposableConvOperandsFn = std::function<OperandIndices(
const HloInstruction&, const OperandIndices&)>;
using CanFoldTransposeOperand = std::function<absl::StatusOr<bool>(
const HloInstruction&, int64_t )>;
static OperandIndices NeverFoldTranspose(const HloInstruction&,
const OperandIndices&) {
return {};
}
static OperandIndices AlwaysFoldTranspose(const HloInstruction&,
const OperandIndices& ids) {
return ids;
}
explicit TransposeFolding(
CanFoldTransposeOperand dot_can_fold_transpose_operand =
IsRowColumnTransposeDotOperand,
TransposableConvOperandsFn transposable_conv_operands =
AlwaysFoldTranspose);
absl::string_view name() const override { return "transpose-folding"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
static absl::StatusOr<bool> IsRowColumnTransposeDotOperand(
const HloInstruction& dot, int64_t operand_idx);
private:
CanFoldTransposeOperand dot_can_fold_transpose_operand_;
TransposableConvOperandsFn transposable_conv_operands_;
};
}
#endif
#include "xla/service/transpose_folding.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
TransposeFolding::OperandIndices CanFoldOperandsIntoConvolution(
const HloInstruction& convolution,
const TransposeFolding::TransposableConvOperandsFn&
transposable_conv_operands) {
if (HloOpcode::kConvolution != convolution.opcode()) {
return {};
}
TransposeFolding::OperandIndices operand_set;
for (int64_t i = 0; i < convolution.operand_count(); ++i) {
auto& operand = *convolution.operand(i);
if (operand.opcode() == HloOpcode::kTranspose) {
operand_set.push_back(i);
}
}
return transposable_conv_operands(convolution, operand_set);
}
bool IsNonIdentityTranspose(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kTranspose) {
for (int dim = 0; dim < instruction->dimensions().size(); ++dim) {
if (dim != instruction->dimensions(dim)) {
return true;
}
}
}
return false;
}
void TransposeDims(tsl::protobuf::RepeatedField<int64_t>& dims,
absl::Span<const int64_t> transpose_dims) {
for (auto& dim : dims) {
dim = transpose_dims[dim];
}
}
using InstructionOperandsPair =
std::pair<HloInstruction*, TransposeFolding::OperandIndices>;
absl::Status FoldTransposeIntoDot(InstructionOperandsPair& pair) {
HloInstruction* dot = pair.first;
DotDimensionNumbers new_dot_dims = dot->dot_dimension_numbers();
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
for (int64_t operand_index : pair.second) {
if (operand_index == 0) {
TransposeDims(*new_dot_dims.mutable_lhs_contracting_dimensions(),
lhs->dimensions());
TransposeDims(*new_dot_dims.mutable_lhs_batch_dimensions(),
lhs->dimensions());
lhs = lhs->mutable_operand(0);
} else {
CHECK_EQ(operand_index, 1);
TransposeDims(*new_dot_dims.mutable_rhs_contracting_dimensions(),
rhs->dimensions());
TransposeDims(*new_dot_dims.mutable_rhs_batch_dimensions(),
rhs->dimensions());
rhs = rhs->mutable_operand(0);
}
}
return dot->parent()->ReplaceWithNewInstruction(
dot, HloInstruction::CreateDot(dot->shape(), lhs, rhs, new_dot_dims,
dot->precision_config()));
}
bool FoldTransposeIntoConvolution(InstructionOperandsPair& pair) {
auto& convolution = *pair.first;
auto& operand_indices = pair.second;
if (operand_indices.empty()) {
return false;
}
const ConvolutionDimensionNumbers& dnums =
convolution.convolution_dimension_numbers();
ConvolutionDimensionNumbers new_dnums = dnums;
HloInstruction* new_lhs;
const int64_t kLhsIdx = 0;
if (absl::c_linear_search(operand_indices, kLhsIdx)) {
HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx);
const auto& transpose_dimensions = transpose.dimensions();
HloInstruction& transpose_operand = *transpose.mutable_operand(0);
new_dnums.set_input_batch_dimension(
transpose_dimensions[dnums.input_batch_dimension()]);
new_dnums.set_input_feature_dimension(
transpose_dimensions[dnums.input_feature_dimension()]);
for (auto& input_spatial_dimension :
*new_dnums.mutable_input_spatial_dimensions()) {
input_spatial_dimension = transpose_dimensions[input_spatial_dimension];
}
new_lhs = &transpose_operand;
} else {
new_lhs = convolution.mutable_operand(kLhsIdx);
}
HloInstruction* new_rhs;
const int64_t kRhsIdx = 1;
if (absl::c_linear_search(operand_indices, kRhsIdx)) {
HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx);
const auto& transpose_dimensions = transpose.dimensions();
HloInstruction& transpose_operand = *transpose.mutable_operand(0);
new_dnums.set_kernel_input_feature_dimension(
transpose_dimensions[dnums.kernel_input_feature_dimension()]);
new_dnums.set_kernel_output_feature_dimension(
transpose_dimensions[dnums.kernel_output_feature_dimension()]);
for (auto& kernel_spatial_dimension :
*new_dnums.mutable_kernel_spatial_dimensions()) {
kernel_spatial_dimension = transpose_dimensions[kernel_spatial_dimension];
}
new_rhs = &transpose_operand;
} else {
new_rhs = convolution.mutable_operand(kRhsIdx);
}
auto new_conv = HloInstruction::CreateConvolve(
convolution.shape(), new_lhs, new_rhs, convolution.feature_group_count(),
convolution.batch_group_count(), convolution.window(), new_dnums,
convolution.precision_config());
TF_CHECK_OK(convolution.parent()->ReplaceWithNewInstruction(
&convolution, std::move(new_conv)));
return true;
}
}
TransposeFolding::TransposeFolding(
CanFoldTransposeOperand dot_can_fold_transpose_operand,
TransposableConvOperandsFn transposable_conv_operands)
: dot_can_fold_transpose_operand_(
std::move(dot_can_fold_transpose_operand)),
transposable_conv_operands_(std::move(transposable_conv_operands)) {}
absl::StatusOr<bool> TransposeFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<InstructionOperandsPair> foldable_dots;
std::vector<InstructionOperandsPair> foldable_convolutions;
FunctionVisitor visit_fn([this, &foldable_dots, &foldable_convolutions](
HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kDot) {
if ((instruction->operand(0)->shape().rank() < 2) ||
(instruction->operand(1)->shape().rank() < 2)) {
return absl::OkStatus();
}
OperandIndices operand_indices;
for (int64_t i = 0; i < 2; ++i) {
if (!IsNonIdentityTranspose(instruction->operand(i))) {
continue;
}
TF_ASSIGN_OR_RETURN(bool can_fold_operand,
dot_can_fold_transpose_operand_(*instruction, i));
if (can_fold_operand) {
operand_indices.push_back(i);
}
}
if (!operand_indices.empty()) {
foldable_dots.emplace_back(instruction, operand_indices);
}
}
{
OperandIndices operand_indices = CanFoldOperandsIntoConvolution(
*instruction, transposable_conv_operands_);
if (!operand_indices.empty()) {
foldable_convolutions.emplace_back(instruction, operand_indices);
}
}
return absl::OkStatus();
});
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(comp->Accept(&visit_fn));
}
bool changed = false;
for (InstructionOperandsPair& pair : foldable_dots) {
TF_RETURN_IF_ERROR(FoldTransposeIntoDot(pair));
changed = true;
}
for (InstructionOperandsPair& pair : foldable_convolutions) {
changed |= FoldTransposeIntoConvolution(pair);
}
return changed;
}
absl::StatusOr<bool>
TransposeFolding::IsRowColumnTransposeDotOperand(const HloInstruction& dot,
int64_t operand_idx) {
TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);
TF_RET_CHECK(dot.operand_count() > operand_idx);
const HloInstruction& transpose = *dot.operand(operand_idx);
TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);
const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();
auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()
: dot_dims.rhs_batch_dimensions();
auto contracting_dims = (operand_idx == 0)
? dot_dims.lhs_contracting_dimensions()
: dot_dims.rhs_contracting_dimensions();
return (batch_dims.size() == transpose.shape().rank() - 2) &&
(contracting_dims.size() == 1) &&
absl::c_all_of(batch_dims, [&](int64_t dim) {
return transpose.dimensions(dim) == dim;
});
}
} | #include "xla/service/transpose_folding.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::tsl::testing::IsOkAndHolds;
using TransposeFoldingTest = HloTestBase;
TEST_F(TransposeFoldingTest, FoldDotTranspose) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[2,3]{1,0} parameter(0)
y = f32[2,3]{1,0} parameter(1)
transpose = f32[3,2]{1,0} transpose(y), dimensions={1,0}
ROOT dot = f32[2,2]{1,0} dot(x, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
1, 1));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfBatchDimByDefault) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[2,3] parameter(0)
y = f32[3,2] parameter(1)
transpose = f32[2,3] transpose(y), dimensions={1,0}
ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldTransposeOfBatchWhenPermitted) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[5,2,3] parameter(0)
y = f32[3,5,4] parameter(1)
transpose = f32[5,3,4] transpose(y), dimensions={1,0,2}
ROOT dot = f32[5,2,4] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TransposeFolding transpose_folding(
[](const HloInstruction&, int64_t) {
return true;
});
EXPECT_THAT(transpose_folding.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
2, 0));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfRank1Dot) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[3] parameter(0)
y = f32[3,2] parameter(1)
transpose = f32[2,3] transpose(y), dimensions={1,0}
ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfDotWithoutContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[3,4] parameter(0)
y = f32[3,4,6,7] parameter(1)
transpose = f32[3,4,7,6] transpose(y), dimensions={0,1,3,2}
ROOT dot = f32[3,4,7,6] dot(x, transpose), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={}, rhs_contracting_dims={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldDotTransposeConstant) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTransposeConstant
ENTRY entry_computation {
constant = f32[2,1]{1,0} constant({ { 1 }, { 2 } })
transpose = f32[1,2]{1,0} transpose(constant), dimensions={1,0}
constant.1 = f32[3,2]{1,0} constant({ { 1, 2 }, { 3, 4 }, { 5, 6 } })
transpose.1 = f32[2,3]{1,0} transpose(constant.1), dimensions={1,0}
ROOT dot = f32[1,3]{1,0} dot(transpose, transpose.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Constant(), op::Constant(),
0, 1));
}
TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) {
auto builder = HloComputation::Builder("entry");
HloInstruction* const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* const3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
const1->shape(), HloOpcode::kAdd, const1, const2));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
const2->shape(), HloOpcode::kSubtract, const2, const3));
HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary(
add->shape(), HloOpcode::kMultiply, add, sub));
auto module = CreateNewVerifiedModule("fuse_with_constant_operands");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(mul));
HloInstruction* call = module->OutlineExpressionFromComputation(
{add, sub, mul}, "entry", entry_computation);
EXPECT_EQ(call, entry_computation->root_instruction());
HloComputation* callee_computation = call->to_apply();
EXPECT_THAT(call->operands(),
::testing::UnorderedElementsAre(const1, const2, const3));
EXPECT_EQ(6, callee_computation->instruction_count());
}
TEST_F(TransposeFoldingTest, FoldDotTransposeInCall) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTransposeInCall
callee {
name.0 = f32[2,3]{1,0} parameter(0)
name.1 = f32[2,3]{1,0} parameter(1)
transpose.clone = f32[3,2]{1,0} transpose(name.0), dimensions={1,0}
ROOT dot.clone = f32[2,2]{1,0} dot(name.1, transpose.clone), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry_computation {
y = f32[2,3]{1,0} parameter(1)
x = f32[2,3]{1,0} parameter(0)
ROOT call = f32[2,2]{1,0} call(y, x), to_apply=callee
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
const HloComputation* callee = module->GetComputationWithName("callee");
ASSERT_NE(callee, nullptr);
EXPECT_THAT(callee->root_instruction(),
op::Dot(op::Parameter(1), op::Parameter(0),
1, 1));
}
TEST_F(TransposeFoldingTest, FoldConvDimSwapTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"y"));
HloInstruction* transpose_y =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 0, 2, 3}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(
transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
x->shape(), transpose_y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), x, transpose_y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
CHECK_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.kernel_input_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_output_feature_dimension());
EXPECT_EQ(dnums.kernel_output_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_input_feature_dimension());
}
TEST_F(TransposeFoldingTest, FoldConvComplexTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 2, 1, 3}),
"y"));
HloInstruction* transpose_y =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 3, 0, 2}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(
transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
x->shape(), transpose_y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), x, transpose_y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
CHECK_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.kernel_input_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_output_feature_dimension());
EXPECT_EQ(dnums.kernel_spatial_dimensions(1),
new_conv->convolution_dimension_numbers()
.kernel_input_feature_dimension());
EXPECT_EQ(
dnums.kernel_output_feature_dimension(),
new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(0));
EXPECT_EQ(
dnums.kernel_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldConvTransposeLhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"y"));
HloInstruction* transpose_x =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 2, 3}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
transpose_x->shape(), y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), transpose_x, y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
EXPECT_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.input_feature_dimension(),
new_conv->convolution_dimension_numbers().input_batch_dimension());
EXPECT_EQ(
dnums.input_batch_dimension(),
new_conv->convolution_dimension_numbers().input_feature_dimension());
EXPECT_EQ(
dnums.input_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));
EXPECT_EQ(
dnums.input_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));
EXPECT_EQ(
dnums.output_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldConvComplexTransposeLhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"y"));
HloInstruction* transpose_x =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 3, 2}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
transpose_x->shape(), y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), transpose_x, y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
EXPECT_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.input_feature_dimension(),
new_conv->convolution_dimension_numbers().input_batch_dimension());
EXPECT_EQ(
dnums.input_batch_dimension(),
new_conv->convolution_dimension_numbers().input_feature_dimension());
EXPECT_EQ(
dnums.input_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));
EXPECT_EQ(
dnums.input_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldBatchDotTranspose) {
constexpr absl::string_view kHloString = R"(
HloModule FoldBatchDotTranspose
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,2,3]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,3,2}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
3, 3));
}
TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeBatch) {
constexpr absl::string_view kHloString = R"(
HloModule NoFoldBatchDotTransposeBatch
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,2,3]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={1,0,3,2}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldBatchDotTransposeNonContiguousBatch) {
constexpr absl::string_view kHloString = R"(
HloModule FoldBatchDotTransposeNonContiguousBatch
ENTRY entry_computation {
x = f32[7,2,7,3]{3,2,1,0} parameter(0)
y = f32[7,2,7,3]{3,2,1,0} parameter(1)
transpose = f32[7,3,7,2]{3,2,1,0} transpose(y), dimensions={0,3,2,1}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={1}, lhs_batch_dims={0,2}, rhs_batch_dims={0,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
3, 3));
}
TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeIdentity) {
constexpr absl::string_view kHloString = R"(
HloModule NoFoldBatchDotTransposeIdentity
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,3,2]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,2,3}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
}
} |
1,959 | cpp | tensorflow/tensorflow | conditional_simplifier | third_party/xla/xla/service/conditional_simplifier.cc | third_party/xla/xla/service/conditional_simplifier_test.cc | #ifndef XLA_SERVICE_CONDITIONAL_SIMPLIFIER_H_
#define XLA_SERVICE_CONDITIONAL_SIMPLIFIER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConditionalSimplifier : public HloModulePass {
public:
absl::string_view name() const override { return "simplify-conditional"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> TryRemoveConditional(HloInstruction* conditional);
};
}
#endif
#include "xla/service/conditional_simplifier.h"
#include <iterator>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/call_graph.h"
#include "xla/service/call_inliner.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool ComputationIsEmptyWithArrayRoot(const HloComputation* computation) {
bool empty_operations = absl::c_all_of(
computation->MakeInstructionPostOrder(),
HloPredicateIsOp<HloOpcode::kTuple, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>);
bool contains_array = false;
ShapeUtil::ForEachSubshape(computation->root_instruction()->shape(),
[&](const Shape& shape, const ShapeIndex& index) {
if (shape.IsArray()) {
contains_array = true;
}
});
return empty_operations && contains_array;
}
absl::StatusOr<bool> TryRemoveUnusedConditionalOperands(
HloComputation* computation,
const absl::flat_hash_set<HloInstruction*>& calling_conditionals) {
HloInstruction* param = computation->parameter_instruction(0);
if (param == computation->root_instruction()) {
return false;
}
if (!param->shape().IsTuple()) {
return false;
}
std::set<int64_t> tuple_indices_to_keep;
for (HloInstruction* user : param->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
tuple_indices_to_keep.insert(user->tuple_index());
}
int64_t old_tuple_element_count =
ShapeUtil::TupleElementCount(param->shape());
if (tuple_indices_to_keep.size() == old_tuple_element_count) {
return false;
}
std::vector<const Shape*> new_tuple_shapes;
new_tuple_shapes.reserve(tuple_indices_to_keep.size());
std::vector<int64_t> map(old_tuple_element_count, -1);
for (int64_t i : tuple_indices_to_keep) {
map[i] = new_tuple_shapes.size();
new_tuple_shapes.push_back(¶m->shape().tuple_shapes(i));
}
Shape tuple_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes);
HloComputation* new_computation =
computation->parent()->AddEmbeddedComputation(computation->Clone());
param = new_computation->parameter_instruction(0);
*param->mutable_shape() = tuple_shape;
for (HloInstruction* user : param->users()) {
user->set_tuple_index(map[user->tuple_index()]);
}
for (HloInstruction* conditional : calling_conditionals) {
if (conditional->has_sharding()) {
continue;
}
for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) {
if (conditional->branch_computation(branch) != computation) {
continue;
}
conditional->set_branch_computation(branch, new_computation);
const Shape& old_shape = conditional->operand(branch + 1)->shape();
std::vector<HloInstruction*> new_tuple_operands;
new_tuple_operands.reserve(tuple_indices_to_keep.size());
for (int64_t i : tuple_indices_to_keep) {
new_tuple_operands.push_back(conditional->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(i),
conditional->mutable_operand(branch + 1), i)));
}
HloInstruction* new_tuple = conditional->parent()->AddInstruction(
HloInstruction::CreateTuple(new_tuple_operands));
TF_RETURN_IF_ERROR(
conditional->ReplaceOperandWithDifferentShape(branch + 1, new_tuple));
CHECK(ShapeUtil::Compatible(conditional->operand(branch + 1)->shape(),
conditional->branch_computation(branch)
->parameter_instruction(0)
->shape()));
CHECK(ShapeUtil::Compatible(
conditional->shape(),
conditional->branch_computation(branch)->root_instruction()->shape()))
<< conditional->branch_computation(branch)->ToString();
}
}
return true;
}
bool ReplaceRootWithEmptyTupleIfNoUsers(HloInstruction* conditional_op) {
const Shape empty_tuple = ShapeUtil::MakeTupleShape({});
if (conditional_op->user_count() == 0 &&
conditional_op != conditional_op->parent()->root_instruction() &&
!ShapeUtil::Compatible(empty_tuple, conditional_op->shape())) {
for (int64_t branch_id = 0; branch_id < conditional_op->branch_count();
++branch_id) {
auto branch_computation =
conditional_op->GetModule()->AddEmbeddedComputation(
conditional_op->branch_computation(branch_id)->Clone());
conditional_op->set_branch_computation(branch_id, branch_computation);
auto new_empty_root =
branch_computation->AddInstruction(HloInstruction::CreateTuple({}));
branch_computation->set_root_instruction(new_empty_root,
true);
}
*conditional_op->mutable_shape() = empty_tuple;
return true;
}
return false;
}
bool RemoveUnusedTupleElements(HloInstruction* conditional_op) {
if (conditional_op->user_count() == 0 ||
conditional_op == conditional_op->parent()->root_instruction() ||
!conditional_op->shape().IsTuple()) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to non-tuple result:\n"
<< conditional_op->ToShortString();
return false;
}
const int old_tuple_shapes_size = conditional_op->shape().tuple_shapes_size();
std::vector<bool> used_indices(old_tuple_shapes_size, false);
for (const HloInstruction* user : conditional_op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to non-GTE user:\n"
<< user->ToShortString();
return false;
}
used_indices[user->tuple_index()] = true;
}
const int new_tuple_shapes_size =
std::count(used_indices.begin(), used_indices.end(), true);
if (new_tuple_shapes_size == old_tuple_shapes_size) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to every index is in use.";
return false;
}
absl::flat_hash_map<int, int> new_to_old_mapping, old_to_new_mapping;
auto old_iter = used_indices.begin();
for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) {
old_iter = std::find(old_iter, used_indices.end(), true);
const int old_index = std::distance(used_indices.begin(), old_iter);
new_to_old_mapping[new_index] = old_index;
old_to_new_mapping[old_index] = new_index;
++old_iter;
}
const Shape old_shape = conditional_op->shape();
std::vector<const Shape*> new_tuple_shapes;
new_tuple_shapes.reserve(new_tuple_shapes_size);
for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) {
new_tuple_shapes.push_back(
&old_shape.tuple_shapes(new_to_old_mapping[new_index]));
}
const Shape new_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes);
for (HloComputation* branch : conditional_op->branch_computations()) {
const HloInstruction* root = branch->root_instruction();
if (!root->shape().IsTuple() ||
!ShapeUtil::Compatible(branch->root_instruction()->shape(),
old_shape)) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to some branch "
<< branch->name() << " has in-compatible root shape, expect "
<< old_shape.ToString() << ", but got "
<< root->shape().ToString() << "\n"
<< conditional_op->ToString();
return false;
}
}
for (int branch_id = 0; branch_id < conditional_op->branch_count();
++branch_id) {
HloComputation* old_branch = conditional_op->branch_computation(branch_id);
HloComputation* cloned_branch =
conditional_op->GetModule()->AddEmbeddedComputation(
old_branch->Clone());
conditional_op->set_branch_computation(branch_id, cloned_branch);
HloInstruction* old_root = cloned_branch->root_instruction();
std::vector<HloInstruction*> new_tuple_root_operands;
for (int old_index = 0; old_index < old_tuple_shapes_size; ++old_index) {
if (used_indices[old_index]) {
new_tuple_root_operands.push_back(
cloned_branch->AddInstruction(HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(old_index), old_root, old_index)));
}
}
HloInstruction* new_tuple_root = cloned_branch->AddInstruction(
HloInstruction::CreateTuple(new_tuple_root_operands));
cloned_branch->set_root_instruction(new_tuple_root,
true);
}
*conditional_op->mutable_shape() = new_shape;
for (HloInstruction* user : conditional_op->users()) {
const int old_index = user->tuple_index();
const int new_index = old_to_new_mapping[old_index];
user->set_tuple_index(new_index);
}
return true;
}
bool MergeDuplicateTupleElements(HloInstruction* conditional) {
if (conditional->user_count() == 0 ||
conditional == conditional->parent()->root_instruction() ||
!conditional->shape().IsTuple()) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not tuple shape nor root "
"instruction:\n"
<< conditional->ToShortString();
return false;
}
for (const HloInstruction* user : conditional->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not all users are "
"kGetTupleElement:\n"
<< conditional->ToShortString();
return false;
}
}
for (const HloComputation* branch : conditional->branch_computations()) {
if (branch->root_instruction()->opcode() != HloOpcode::kTuple) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not all branch roots "
"are kTuple:\n"
<< conditional->ToShortString();
return false;
}
}
auto vectorize_branches_root_tuple_ith_operand = [conditional](int64_t i) {
std::vector<const HloInstruction*> operands;
absl::c_transform(conditional->branch_computations(),
std::back_inserter(operands),
[i](const HloComputation* branch) {
return branch->root_instruction()->operand(i);
});
return operands;
};
auto replace_root_user_gte_jth_with_gte_ith = [conditional](int64_t i,
int64_t j) {
bool changed = false;
for (HloInstruction* user : conditional->users()) {
if (user->tuple_index() == j) {
user->set_tuple_index(i);
changed |= true;
}
}
return changed;
};
bool changed = false;
absl::flat_hash_map<std::vector<const HloInstruction*>, int64_t>
index_collision_table;
for (int i = 0; i < conditional->shape().tuple_shapes_size(); ++i) {
const std::vector<const HloInstruction*> ith_operands_vector =
vectorize_branches_root_tuple_ith_operand(i);
const auto emplace_res =
index_collision_table.emplace(ith_operands_vector, i);
if (!emplace_res.second) {
changed |=
replace_root_user_gte_jth_with_gte_ith(emplace_res.first->second, i);
}
}
return changed;
}
}
absl::StatusOr<bool> ConditionalSimplifier::TryRemoveConditional(
HloInstruction* conditional) {
CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);
if (!conditional->parent()->IsSafelyRemovable(conditional) ||
conditional->HasSideEffect()) {
VLOG(2) << "Not attempting to remove conditional as it is not removable or "
"has side effect: "
<< conditional->ToShortString();
return false;
}
auto computation = conditional->parent();
auto create_call = [&](int64_t branch) {
auto call = computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(1 + branch)},
conditional->branch_computation(branch)));
conditional->SetupDerivedInstruction(call);
return call;
};
if (conditional->branch_count() == 1) {
HloInstruction* call_op = create_call(0);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status());
return true;
}
if (conditional->operand(0)->opcode() == HloOpcode::kConstant) {
int branch_index = 0;
if (conditional->operand(0)->shape().element_type() == PRED) {
branch_index = conditional->operand(0)->literal().Get<bool>({}) ? 0 : 1;
} else {
branch_index = conditional->operand(0)->literal().Get<int32_t>({});
if (branch_index < 0 || branch_index >= conditional->branch_count()) {
branch_index = conditional->branch_count() - 1;
}
}
HloInstruction* call_op = create_call(branch_index);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status());
return true;
}
auto instruction_is_expensive = [](const HloInstruction* hlo) {
switch (hlo->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReduce:
case HloOpcode::kReshape:
case HloOpcode::kPad:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTuple:
return false;
default:
return !hlo->IsElementwise();
}
};
if (conditional->branch_count() != 2 ||
conditional->operand(0)->shape().element_type() != PRED ||
absl::c_any_of(conditional->branch_computation(0)->instructions(),
instruction_is_expensive) ||
absl::c_any_of(conditional->branch_computation(1)->instructions(),
instruction_is_expensive)) {
VLOG(2)
<< "Not attempting to remove conditional as its branch_index is not a "
"compile-time constant or contains expensive instructions: "
<< conditional->ToShortString();
return false;
}
bool branch_empty =
ComputationIsEmptyWithArrayRoot(conditional->branch_computation(0)) ||
ComputationIsEmptyWithArrayRoot(conditional->branch_computation(1));
if (branch_empty) {
return false;
}
HloInstruction* true_call_op = create_call(0);
HloInstruction* false_call_op = create_call(1);
auto condition_broadcast = [&](const Shape& shape) {
if (ShapeUtil::IsScalar(shape)) {
return conditional->mutable_operand(0);
}
Shape new_shape = ShapeUtil::ChangeElementType(shape, PRED);
UpdateLayout(&new_shape);
return computation->AddInstruction(HloInstruction::CreateBroadcast(
new_shape, conditional->mutable_operand(0), {}));
};
auto gte = [&](HloInstruction* hlo, int64_t i) {
return computation->AddInstruction(HloInstruction::CreateGetTupleElement(
hlo->shape().tuple_shapes(i), hlo, i));
};
std::function<HloInstruction*(HloInstruction*, HloInstruction*)> select =
[&](HloInstruction* t, HloInstruction* f) {
if (f->shape().IsToken()) {
return computation->AddInstruction(
HloInstruction::CreateAfterAll({t, f}));
}
if (f->shape().IsArray()) {
return computation->AddInstruction(HloInstruction::CreateTernary(
f->shape(), HloOpcode::kSelect, condition_broadcast(f->shape()),
t, f));
}
std::vector<HloInstruction*> selects;
const int64_t tuple_element_count =
ShapeUtil::TupleElementCount(f->shape());
selects.reserve(tuple_element_count);
for (int64_t i = 0; i < tuple_element_count; ++i) {
selects.push_back(select(gte(t, i), gte(f, i)));
}
return computation->AddInstruction(
HloInstruction::CreateTuple(selects));
};
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(
conditional, select(true_call_op, false_call_op)));
TF_RETURN_IF_ERROR(CallInliner::Inline(false_call_op).status());
TF_RETURN_IF_ERROR(CallInliner::Inline(true_call_op).status());
return true;
}
static bool ComputationCallsChannelInstructions(
const HloComputation& computation) {
std::vector<const HloComputation*> worklist = {&computation};
while (!worklist.empty()) {
const HloComputation* work = worklist.back();
worklist.pop_back();
for (const HloInstruction* instruction : work->instructions()) {
if (DynCast<HloChannelInstruction>(instruction) != nullptr) {
return true;
}
worklist.insert(worklist.end(),
instruction->called_computations().begin(),
instruction->called_computations().end());
}
}
return false;
}
static bool InstructionCallsChannelInstructions(
const HloInstruction& instruction) {
for (const HloComputation* called_computation :
instruction.called_computations()) {
if (ComputationCallsChannelInstructions(*called_computation)) {
return true;
}
}
return false;
}
absl::StatusOr<bool> ConditionalSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
3, "ConditionalSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
std::vector<HloInstruction*> conditional_ops;
for (auto* comp : module->computations(execution_threads)) {
for (auto* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kConditional) {
if (InstructionCallsChannelInstructions(*instr)) {
continue;
}
if (instr->has_sharding()) {
continue;
}
conditional_ops.push_back(instr);
}
}
}
absl::flat_hash_set<HloInstruction*> removed_conditionals;
for (HloInstruction* conditional_op : conditional_ops) {
changed |= MergeDuplicateTupleElements(conditional_op);
changed |= RemoveUnusedTupleElements(conditional_op);
changed |= ReplaceRootWithEmptyTupleIfNoUsers(conditional_op);
TF_ASSIGN_OR_RETURN(bool result, TryRemoveConditional(conditional_op));
if (result) {
removed_conditionals.insert(conditional_op);
changed = true;
}
}
absl::flat_hash_map<HloComputation*, absl::flat_hash_set<HloInstruction*>>
calling_conditionals;
std::vector<HloComputation*> calling_computationals_vector;
for (HloInstruction* conditional : conditional_ops) {
if (removed_conditionals.contains(conditional)) {
continue;
}
for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) {
auto* branch_comp = conditional->branch_computation(branch);
if (!calling_conditionals.contains(branch_comp)) {
calling_computationals_vector.push_back(branch_comp);
}
calling_conditionals[branch_comp].insert(conditional);
}
}
for (auto* comp : calling_computationals_vector) {
auto entry = calling_conditionals.find(comp);
CHECK(entry != calling_conditionals.end());
TF_ASSIGN_OR_RETURN(bool result, TryRemoveUnusedConditionalOperands(
entry->first, entry->second));
changed |= result;
}
XLA_VLOG_LINES(3,
"ConditionalSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/conditional_simplifier.h"
#include <string>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ConditionalSimplifierTest : public HloTestBase {
public:
HloComputation* MakeConditional(HloModule* module, bool is_constant = true);
};
HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module,
bool is_constant) {
HloComputation::Builder builder(TestName());
HloComputation* true_computation;
{
HloComputation::Builder true_computation_builder(TestName() +
".true_computation");
auto param =
true_computation_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param"));
auto one = true_computation_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
true_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, one));
true_computation =
module->AddEmbeddedComputation(true_computation_builder.Build());
}
HloComputation* false_computation;
{
HloComputation::Builder false_computation_builder(TestName() +
".false_computation");
auto param = false_computation_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(S32, {}),
"param"));
auto forty_two = false_computation_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42)));
false_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, forty_two));
false_computation =
module->AddEmbeddedComputation(false_computation_builder.Build());
}
auto false_instrn = builder.AddInstruction(
is_constant
? HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))
: HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(PRED, {}),
"cond"));
auto false_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "false_param"));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
builder.AddInstruction(HloInstruction::CreateConditional(
ShapeUtil::MakeShape(S32, {}), false_instrn, one, true_computation,
false_param, false_computation));
return module->AddEntryComputation(builder.Build());
}
TEST_F(ConditionalSimplifierTest, ConditionalGetsInlined) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value());
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Parameter(), op::Constant()));
}
TEST_F(ConditionalSimplifierTest, BranchGetsInlined) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get(), false);
ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value());
EXPECT_THAT(
computation->root_instruction(),
op::Select(op::Parameter(1), op::Add(op::Constant(), op::Constant()),
op::Add(op::Parameter(0), op::Constant())));
}
TEST_F(ConditionalSimplifierTest, ConditionalWithControlDependency) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* true_op = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(
true_op->AddControlDependencyTo(computation->root_instruction()));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsSend) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* send = true_computation->AddInstruction(HloInstruction::CreateSend(
true_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
token, 0));
true_computation->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsRecv) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* recv = true_computation->AddInstruction(HloInstruction::CreateRecv(
ShapeUtil::MakeShape(F32, {1}), token, 0));
true_computation->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsNonRemovableInstruction) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* false_computation = conditional->false_computation();
auto token = false_computation->AddInstruction(HloInstruction::CreateToken());
false_computation->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, TrivalOperandsRemoved) {
absl::string_view hlo_string =
R"(
HloModule UnusedTupleOperands
on_false {
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
rhs = f32[40,40] get-tuple-element(t), index=1
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (f32[20,40]) tuple(dot)
}
on_true {
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=2
rhs = f32[40,40] get-tuple-element(t), index=3
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (f32[20,40]) tuple(dot)
}
ENTRY main {
c0_0 = f32[20,40] parameter(0)
c0_1 = f32[40,40] parameter(1)
c1_0 = f32[20,40] parameter(2)
c1_1 = f32[40,40] parameter(3)
p = pred[] parameter(4)
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) tuple(c0_0, c0_1, c1_0, c1_1)
call = (f32[20,40]) call(t), to_apply=on_true
ROOT result = (f32[20,40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
std::unique_ptr<HloModule> module = std::move(status).value();
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
TF_ASSERT_OK(v.Run(module.get()).status());
HloInstruction* conditional = module->entry_computation()->root_instruction();
EXPECT_TRUE(conditional != nullptr);
EXPECT_EQ(conditional->operand(1)->shape().tuple_shapes().size(), 2);
EXPECT_EQ(conditional->operand(2)->shape().tuple_shapes().size(), 2);
HloInstruction* call = FindInstruction(module.get(), "call");
EXPECT_EQ(
call->to_apply()->parameter_instruction(0)->shape().tuple_shapes().size(),
4);
}
TEST_F(ConditionalSimplifierTest,
TwoConditionalsCreatedInReversedLexicalOrder) {
absl::string_view hlo_string = R"(
HloModule DeadConditional
computation.1 {
param.1 = s64[] parameter(0)
constant.1 = s64[] constant(1)
ROOT add.1 = s64[] add(param.1, constant.1)
}
computation.2 {
param.2 = s64[] parameter(0)
constant.2 = s64[] constant(2)
ROOT add.2 = s64[] add(param.2, constant.2)
}
computation.3 {
param.3 = s64[] parameter(0)
constant.3 = s64[] constant(3)
ROOT add.3 = s64[] add(param.3, constant.3)
}
computation.4 {
param.4 = s64[] parameter(0)
constant.4 = s64[] constant(4)
ROOT add.4 = s64[] add(param.4, constant.4)
}
ENTRY KernelEntry {
param.1 = s64[] parameter(0)
param.2 = s64[] parameter(1)
param.3 = s64[] parameter(2)
param.4 = pred[] parameter(3)
conditional_1 = s64[] conditional(param.4, param.3, param.2),
true_computation=computation.3, false_computation=computation.4
constant.1 = pred[] constant(false)
ROOT conditional_2 = s64[] conditional(constant.1, conditional_1,
param.1), true_computation=computation.1,
false_computation=computation.2
})";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
std::unique_ptr<HloModule> module = std::move(status).value();
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
HloInstruction* conditional_1 =
FindInstruction(module.get(), "conditional_1");
HloInstruction* conditional_1_clone =
conditional_1->parent()->AddInstruction(conditional_1->Clone());
TF_ASSERT_OK(conditional_1->ReplaceAllUsesWith(conditional_1_clone));
TF_ASSERT_OK(conditional_1->parent()->RemoveInstruction(conditional_1));
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
}
TEST_F(ConditionalSimplifierTest, RemoveDeadRoots) {
absl::string_view hlo_string =
R"(
HloModule RemoveDeadRoots
on_false {
t = (f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
rhs = f32[40,40] get-tuple-element(t), index=1
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
after-all = token[] after-all()
outfeed = token[] outfeed(dot, after-all)
ROOT result = (f32[20,40]) tuple(dot)
}
on_true {
t = (f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
add = f32[20,40] add(lhs, lhs)
ROOT result = (f32[20,40]) tuple(add)
}
ENTRY main {
c0_0 = f32[20,40] parameter(0)
c0_1 = f32[40,40] parameter(1)
p = pred[] parameter(2)
t = (f32[20,40], f32[40,40]) tuple(c0_0, c0_1)
conditional = (f32[20, 40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true
ROOT result = () tuple()
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 0);
}
TEST_F(ConditionalSimplifierTest, SecondTupleElementUnusedAndRemoved) {
absl::string_view hlo_string =
R"(
HloModule SecondTupleElementUnusedAndRemoved
on_true {
arg_tuple.7 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0
copy = f32[10,10]{1,0} copy(get-tuple-element.9)
ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9)
}
on_false {
constant.17 = f32[] constant(0)
constant.18 = f32[] constant(1)
rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform
arg_tuple.14 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0
ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16)
}
ENTRY main {
constant.38 = pred[] constant(true)
arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0)
get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1
tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21)
conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false
get-first-index = f32[10,10]{1,0} get-tuple-element(conditional), index=0
ROOT result = (f32[10,10]{1,0}) tuple(get-first-index)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
}
TEST_F(ConditionalSimplifierTest, FirstTupleElementUnusedAndRemoved) {
absl::string_view hlo_string =
R"(
HloModule FirstTupleElementUnusedAndRemoved
on_true {
arg_tuple.7 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0
copy = f32[10,10]{1,0} copy(get-tuple-element.9)
ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9)
}
on_false {
constant.17 = f32[] constant(0)
constant.18 = f32[] constant(1)
rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform
arg_tuple.14 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0
ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16)
}
ENTRY main {
constant.38 = pred[] constant(true)
arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0)
get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1
tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21)
conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false
get-second-index = f32[10,10]{1,0} get-tuple-element(conditional), index=1
ROOT result = (f32[10,10]{1,0}) tuple(get-second-index)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
}
TEST_F(ConditionalSimplifierTest, MergeDuplicateTupleElements) {
absl::string_view hlo_string =
R"(
HloModule MergeDuplicateTupleElements
on_true {
param-true = (f32[]) parameter(0)
gte-true = f32[] get-tuple-element(param-true), index=0
ROOT tuple-true = (f32[], f32[]) tuple(gte-true, gte-true)
}
on_false {
param-false = (f32[]) parameter(0)
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
rng = f32[] rng(constant.0, constant.1), distribution=rng_uniform
ROOT tuple-false = (f32[], f32[]) tuple(rng, rng)
}
ENTRY main {
comp = pred[] parameter(0)
arg = (f32[]) parameter(1)
conditional = (f32[], f32[]) conditional(comp, arg, arg), true_computation=on_true, false_computation=on_false
gte.0 = f32[] get-tuple-element(conditional), index=0
gte.1 = f32[] get-tuple-element(conditional), index=1
ROOT add = f32[] add(gte.0, gte.1)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
const HloInstruction* gte_0 = FindInstruction(status.value().get(), "gte.0");
const HloInstruction* gte_1 = FindInstruction(status.value().get(), "gte.1");
EXPECT_EQ(gte_0->tuple_index(), 0);
EXPECT_EQ(gte_1->tuple_index(), 0);
}
TEST_F(ConditionalSimplifierTest, SimplifyConditionalWithTokens) {
absl::string_view hlo_string =
R"(
HloModule SimplifyConditionalWithTokens
true_comp {
ROOT parameter.13 = (token[]) parameter(0)
}
false_comp {
ROOT parameter.21 = (token[]) parameter(0)
}
ENTRY entry {
parameter.29 = pred[] parameter(0)
token.1 = token[] after-all()
token.2 = token[] after-all()
tuple.3 = (token[]) tuple(token.1)
tuple.4 = (token[]) tuple(token.2)
ROOT conditional.5 = (token[]) conditional(parameter.29, tuple.3, tuple.4), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AfterAll(
op::GetTupleElement(op::Tuple(op::AfterAll()), 0),
op::GetTupleElement(op::Tuple(op::AfterAll()), 0))));
}
}
} |
1,960 | cpp | tensorflow/tensorflow | indexed_array_analysis | third_party/xla/xla/service/indexed_array_analysis.cc | third_party/xla/xla/service/indexed_array_analysis_test.cc | #ifndef XLA_SERVICE_INDEXED_ARRAY_ANALYSIS_H_
#define XLA_SERVICE_INDEXED_ARRAY_ANALYSIS_H_
#include <type_traits>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class IndexedArrayAnalysis {
public:
class Array {
public:
enum Kind {
kUnknown,
kConstant,
kReshaped,
kScalarIndexedConstant,
kScalarIndexed
};
virtual Kind kind() const = 0;
virtual const Shape& shape() const = 0;
template <typename T>
T* as() {
static_assert((std::is_base_of<Array, T>::value),
"target type not derived from source type");
#if !defined(__GNUC__) || defined(__GXX_RTTI)
CHECK_NE(dynamic_cast<T*>(this), nullptr);
#endif
return static_cast<T*>(this);
}
virtual ~Array() = default;
Array& operator=(const Array& other) = delete;
};
class UnknownArray : public Array {
public:
Kind kind() const override { return kUnknown; }
const Shape& shape() const override { return instruction().shape(); }
const HloInstruction& instruction() const { return instruction_; }
private:
explicit UnknownArray(const HloInstruction* instr) : instruction_(*instr) {}
const HloInstruction& instruction_;
friend class IndexedArrayAnalysis;
};
class ConstantArray : public Array {
public:
Kind kind() const override { return kConstant; }
const Shape& shape() const override { return literal()->shape(); }
const Literal* literal() const { return literal_; }
private:
explicit ConstantArray(const Literal* literal) : literal_(literal) {}
const Literal* literal_;
friend class IndexedArrayAnalysis;
};
class ReshapedArray : public Array {
public:
Kind kind() const override { return kReshaped; }
Array* operand() const { return operand_; }
const Shape& shape() const override { return shape_; }
private:
explicit ReshapedArray(Array* operand, Shape shape)
: operand_(operand), shape_(shape) {}
Array* operand_;
const Shape shape_;
friend class IndexedArrayAnalysis;
};
class ScalarIndexedArray : public Array {
public:
Kind kind() const override { return kScalarIndexed; }
const Shape& shape() const override { return shape_; }
Array* source() const { return source_; }
Array* indices() const { return indices_; }
int64_t source_dim() const { return source_dim_; }
absl::Span<const int64_t> output_dims() const { return output_dims_; }
private:
explicit ScalarIndexedArray(Array* source, Array* indices,
int64_t source_dim,
std::vector<int64_t> output_dims, Shape shape)
: source_(source),
indices_(indices),
source_dim_(source_dim),
output_dims_(std::move(output_dims)),
shape_(std::move(shape)) {}
Array* source_;
Array* indices_;
int64_t source_dim_;
std::vector<int64_t> output_dims_;
Shape shape_;
friend class IndexedArrayAnalysis;
};
class ScalarIndexedConstantArray : public ScalarIndexedArray {
public:
Kind kind() const override { return kScalarIndexedConstant; }
const Literal& literal() const {
return *source()->as<ConstantArray>()->literal();
}
private:
explicit ScalarIndexedConstantArray(Array* source, Array* indices,
int64_t source_dim,
std::vector<int64_t> output_dims,
Shape shape)
: ScalarIndexedArray(source, indices, source_dim,
std::move(output_dims), std::move(shape)) {
CHECK(dynamic_cast<ConstantArray*>(source));
}
friend class IndexedArrayAnalysis;
};
absl::StatusOr<Array*> GetArrayFor(const HloInstruction* instr);
std::string ToString(Array* root, bool print_constants = false);
private:
absl::Status TraverseAndPopulateCache(const HloInstruction* root);
absl::StatusOr<Array*> ComputeArrayFor(const HloInstruction* instr);
absl::StatusOr<Array*> ComputeArrayForConstant(const Literal& literal);
absl::StatusOr<Array*> ComputeArrayForGather(
const Shape& shape, const GatherDimensionNumbers& dim_numbers,
absl::Span<const int64_t> slice_sizes, Array* source, Array* indices);
absl::StatusOr<Array*> ComputeArrayForDotWithIndexedLhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs,
ConstantArray* rhs);
absl::StatusOr<Array*> ComputeArrayForDotWithIndexedRhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, ConstantArray* lhs,
ScalarIndexedConstantArray* rhs);
absl::StatusOr<Array*> ComputeArrayForDot(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, Array* lhs, Array* rhs);
absl::StatusOr<ScalarIndexedArray*> FoldGatherOfGather(
ScalarIndexedArray* source, Array* indices, int64_t source_dim,
absl::Span<const int64_t> output_dims, Shape shape);
absl::StatusOr<ScalarIndexedArray*> ReshapeToRemoveDegenerateDims(
ScalarIndexedArray* operand);
absl::StatusOr<ScalarIndexedArray*> ReshapeToAddDegenerateDims(
ScalarIndexedArray* operand, absl::Span<const int64_t> degenerate_dims);
absl::StatusOr<ScalarIndexedArray*> FoldReshapeOfGather(
const Shape& shape, ScalarIndexedConstantArray* operand);
absl::StatusOr<ScalarIndexedArray*> FoldReshapeOfGatherNoDegenerateDims(
const Shape& shape, ScalarIndexedConstantArray* scalar_indexed);
absl::StatusOr<Array*> ComputeArrayForReshape(const Shape& shape,
Array* operand);
absl::StatusOr<Array*> ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,
Array* lhs,
Array* rhs);
absl::StatusOr<Array*> ComputeArrayForElementwiseUnaryOp(HloOpcode opcode,
Array* operand);
template <typename T, typename... Args>
T* Construct(Args&&... args) {
T* new_tensor = new T(std::forward<Args>(args)...);
owned_tensors_.push_back(std::unique_ptr<T>(new_tensor));
return new_tensor;
}
ScalarIndexedArray* ConstructScalarIndexedArray(
Array* source, Array* indices, int64_t source_dim,
std::vector<int64_t> output_dims, Shape shape) {
if (source->kind() == Array::kConstant) {
return Construct<ScalarIndexedConstantArray>(source, indices, source_dim,
std::move(output_dims),
std::move(shape));
} else {
return Construct<ScalarIndexedArray>(source, indices, source_dim,
std::move(output_dims),
std::move(shape));
}
}
Literal* TakeOwnership(Literal literal) {
owned_literals_.push_back(std::move(literal));
return &owned_literals_.back();
}
absl::StatusOr<Literal*> TakeOwnership(
absl::StatusOr<Literal> literal_or_error) {
TF_ASSIGN_OR_RETURN(Literal literal, std::move(literal_or_error));
owned_literals_.push_back(std::move(literal));
return &owned_literals_.back();
}
std::vector<std::unique_ptr<Array>> owned_tensors_;
std::vector<Literal> owned_literals_;
absl::flat_hash_map<const HloInstruction*, Array*> cache_;
};
class IndexedArrayAnalysisPrinterPass : public HloModulePass {
public:
absl::string_view name() const override {
return "indexed-array-analysis-printer-pass";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/indexed_array_analysis.h"
#include <algorithm>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/map_util.h"
#include "xla/util.h"
namespace xla {
namespace {
using Analysis = IndexedArrayAnalysis;
using UnknownArray = Analysis::UnknownArray;
using ConstantArray = Analysis::ConstantArray;
using ReshapedArray = Analysis::ReshapedArray;
using ScalarIndexedArray = Analysis::ScalarIndexedArray;
using absl::StrJoin;
}
std::string IndexedArrayAnalysis::ToString(Array* root, bool print_constants) {
switch (root->kind()) {
case Array::kUnknown: {
auto* unknown_tensor = root->as<UnknownArray>();
return absl::StrCat("%", unknown_tensor->instruction().name());
}
case Array::kConstant: {
if (print_constants) {
std::string contents = root->as<ConstantArray>()->literal()->ToString();
return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()),
" ", contents, ")");
}
return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()),
")");
}
case Array::kReshaped: {
ReshapedArray* reshaped_array = root->as<ReshapedArray>();
return absl::StrCat(
"(reshape ", ToString(reshaped_array->operand(), print_constants),
" to ", ShapeUtil::HumanString(reshaped_array->shape()), ")");
}
case Array::kScalarIndexedConstant:
case Array::kScalarIndexed: {
auto* indexed_array = root->as<ScalarIndexedArray>();
std::string name = root->kind() == Array::kScalarIndexedConstant
? "scalar-indexed-const"
: "scalar-indexed";
return absl::StrCat(
"(", name, " ", ToString(indexed_array->source(), print_constants),
" ", ToString(indexed_array->indices(), print_constants), " ",
indexed_array->source_dim(), "->[",
StrJoin(indexed_array->output_dims(), ","), "])");
}
}
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::GetArrayFor(
const HloInstruction* instr) {
auto it = cache_.find(instr);
if (it != cache_.end()) {
return it->second;
}
TF_RETURN_IF_ERROR(TraverseAndPopulateCache(instr));
return FindOrDie(cache_, instr);
}
absl::Status IndexedArrayAnalysis::TraverseAndPopulateCache(
const HloInstruction* root) {
absl::InlinedVector<const HloInstruction*, 4> stack;
enum DfsState { kDiscovered, kVisited };
absl::flat_hash_map<const HloInstruction*, DfsState> dfs_state_map;
stack.push_back(root);
InsertOrDie(&dfs_state_map, root, kDiscovered);
do {
const HloInstruction* instr = stack.back();
if (cache_.contains(instr)) {
stack.pop_back();
continue;
}
switch (FindOrDie(dfs_state_map, instr)) {
case kDiscovered: {
for (const HloInstruction* operand : instr->operands()) {
if (!cache_.contains(operand)) {
stack.push_back(operand);
CHECK(!dfs_state_map.contains(operand) ||
dfs_state_map[operand] == kDiscovered);
dfs_state_map[operand] = kDiscovered;
}
}
dfs_state_map[instr] = kVisited;
break;
}
case kVisited:
stack.pop_back();
TF_ASSIGN_OR_RETURN(Array * array, ComputeArrayFor(instr));
InsertOrDie(&cache_, instr, array);
break;
}
} while (!stack.empty());
return absl::OkStatus();
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayFor(
const HloInstruction* instr) {
Array* computed_array;
if (instr->IsElementwise() && instr->operand_count() == 1) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForElementwiseUnaryOp(
instr->opcode(), FindOrDie(cache_, instr->operand(0))));
} else if (instr->IsElementwise() && instr->operand_count() == 2) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForElementwiseBinaryOp(
instr->opcode(), FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else if (instr->opcode() == HloOpcode::kConstant) {
TF_ASSIGN_OR_RETURN(computed_array,
ComputeArrayForConstant(instr->literal()));
} else if (instr->opcode() == HloOpcode::kGather) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForGather(instr->shape(), instr->gather_dimension_numbers(),
instr->gather_slice_sizes(),
FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else if (instr->opcode() == HloOpcode::kReshape) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForReshape(instr->shape(),
FindOrDie(cache_, instr->operand(0))));
} else if (instr->opcode() == HloOpcode::kDot) {
TF_ASSIGN_OR_RETURN(
computed_array,
ComputeArrayForDot(instr->shape(), instr->dot_dimension_numbers(),
instr->precision_config(),
FindOrDie(cache_, instr->operand(0)),
FindOrDie(cache_, instr->operand(1))));
} else {
computed_array = nullptr;
}
if (!computed_array) {
computed_array = Construct<UnknownArray>(instr);
}
return computed_array;
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForConstant(
const Literal& literal) {
return Construct<ConstantArray>(&literal);
}
absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldGatherOfGather(
ScalarIndexedArray* source, Array* indices, int64_t source_dim,
absl::Span<const int64_t> output_dims, Shape shape) {
Array* a = source->source();
Array* x = source->indices();
Array* y = indices;
enum class IndexComponent { Ungathered, GatheredFirst, GatheredSecond };
std::vector<IndexComponent> simulated_index(a->shape().dimensions_size(),
IndexComponent::Ungathered);
EraseAt(&simulated_index, source->source_dim());
for (int64_t gather_dim : source->output_dims()) {
simulated_index.insert(simulated_index.begin() + gather_dim,
IndexComponent::GatheredFirst);
}
EraseAt(&simulated_index, source_dim);
for (int64_t output_dim : output_dims) {
simulated_index.insert(simulated_index.begin() + output_dim,
IndexComponent::GatheredSecond);
}
int64_t source_dim_for_index_array =
FindIndex(source->output_dims(), source_dim);
CHECK_NE(source_dim_for_index_array, source->output_dims().size());
std::vector<int64_t> output_dims_for_index_array;
int64_t gathered_index_components_seen = 0;
for (IndexComponent simulation_dim : simulated_index) {
if (simulation_dim == IndexComponent::GatheredSecond) {
output_dims_for_index_array.push_back(gathered_index_components_seen);
}
if (simulation_dim != IndexComponent::Ungathered) {
gathered_index_components_seen++;
}
}
std::vector<int64_t> dim_sizes_for_composed_index;
std::vector<int64_t> output_dims_for_new_gather;
for (int64_t i = 0, e = simulated_index.size(); i < e; i++) {
if (simulated_index[i] != IndexComponent::Ungathered) {
dim_sizes_for_composed_index.push_back(shape.dimensions(i));
output_dims_for_new_gather.push_back(i);
}
}
Array* inner_indices = ConstructScalarIndexedArray(
x, y, source_dim_for_index_array, output_dims_for_index_array,
ShapeUtil::MakeShape(x->shape().element_type(),
dim_sizes_for_composed_index));
return ConstructScalarIndexedArray(a, inner_indices, source->source_dim(),
output_dims_for_new_gather,
std::move(shape));
}
absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather(
const Shape& shape, const GatherDimensionNumbers& dim_numbers,
absl::Span<const int64_t> slice_sizes, Array* source, Array* indices) {
if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) {
VLOG(3) << "ComputeArrayForGather: indices are not scalar";
return nullptr;
}
CHECK_EQ(dim_numbers.start_index_map_size(), 1);
if (dim_numbers.collapsed_slice_dims_size() != 1 ||
dim_numbers.collapsed_slice_dims(0) != dim_numbers.start_index_map(0)) {
VLOG(3) << "ComputeArrayForGather: gather operations must elide "
"start_index_map[0] and "
"start_index_map[0] only";
return nullptr;
}
for (int64_t i = 0, e = source->shape().dimensions_size(); i < e; i++) {
if (i != dim_numbers.collapsed_slice_dims(0) &&
source->shape().dimensions(i) != slice_sizes[i]) {
VLOG(3) << "ComputeArrayForGather: slice_sizes[" << i
<< "] != source->shape().dimensions(" << i << ") -- "
<< source->shape().dimensions(i) << " vs. " << slice_sizes[i]
<< " with dim_numbers.collapsed_slice_dims(0) = "
<< dim_numbers.collapsed_slice_dims(0);
return nullptr;
}
}
int64_t source_dim = dim_numbers.start_index_map(0);
std::vector<int64_t> output_dims;
for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {
if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) {
output_dims.push_back(i);
}
}
if (auto* indexed = dynamic_cast<ScalarIndexedArray*>(source)) {
if (absl::c_linear_search(indexed->output_dims(), source_dim)) {
return FoldGatherOfGather(indexed, indices, source_dim, output_dims,
shape);
}
} else if (auto* constant = dynamic_cast<ConstantArray*>(source)) {
return Construct<ScalarIndexedConstantArray>(constant, indices, source_dim,
output_dims, shape);
}
return Construct<ScalarIndexedArray>(source, indices, source_dim, output_dims,
shape);
}
namespace {
int64_t FindSuffixWithProduct(absl::Span<const int64_t> values,
int64_t product) {
DCHECK(absl::c_all_of(values, [](int64_t value) { return value > 0; }));
int64_t current_product = 1;
int64_t i;
for (i = values.size() - 1; i >= 0 && product > current_product; --i) {
current_product *= values[i];
}
if (product == current_product) {
return i + 1;
}
return -1;
}
struct ReshapePassthroughDimPair {
int64_t result_dim;
int64_t operand_dim;
};
std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
absl::Span<const int64_t> operand_shape,
absl::Span<const int64_t> result_shape) {
std::vector<ReshapePassthroughDimPair> result;
int64_t result_subarray_size = 1;
for (int64_t result_dim = result_shape.size() - 1; result_dim >= 0;
--result_dim) {
int64_t candidate_operand_dim =
FindSuffixWithProduct(operand_shape, result_subarray_size);
CHECK_NE(candidate_operand_dim, 0)
<< "result_dim = " << result_dim
<< ", result_subarray_size = " << result_subarray_size
<< ", result_shape = [" << StrJoin(result_shape, ",") << "]"
<< ", operand_shape = [" << StrJoin(operand_shape, ",") << "]";
if (candidate_operand_dim != -1 &&
result_shape[result_dim] == operand_shape[candidate_operand_dim - 1]) {
result.push_back({result_dim,
candidate_operand_dim - 1});
}
result_subarray_size *= result_shape[result_dim];
}
absl::c_reverse(result);
if (VLOG_IS_ON(3)) {
std::vector<std::string> result_strings;
absl::c_transform(result, std::back_inserter(result_strings),
[](ReshapePassthroughDimPair value) {
return absl::StrCat(value.result_dim, "->",
value.operand_dim);
});
VLOG(3) << "For a reshape from [" << StrJoin(operand_shape, ",") << "] to ["
<< StrJoin(result_shape, ",") << "] passthrough indices are ["
<< StrJoin(result_strings, ",")
<< "] (legend: `result`->`operand`)";
}
DCHECK(absl::c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.result_dim < rhs.result_dim;
}));
DCHECK(absl::c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.operand_dim < rhs.operand_dim;
}));
return result;
}
bool IsReshapePassthroughOperandDim(
absl::Span<const ReshapePassthroughDimPair> passthrough_dims, int64_t dim) {
return absl::c_any_of(passthrough_dims,
[&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == dim;
});
}
int64_t MapPassthroughOperandDimToResultDim(
absl::Span<const ReshapePassthroughDimPair> passthrough_dims,
int64_t operand_dim) {
auto it = absl::c_find_if(
passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == operand_dim;
});
CHECK(it != passthrough_dims.end());
return it->result_dim;
}
int64_t FindSourcePositionForPassthroughResultDim(
a | #include "xla/service/indexed_array_analysis.h"
#include "absl/strings/ascii.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
namespace xla {
namespace {
class IndexedArrayAnalysisTest : public HloTestBase {
protected:
void AssertArrayForRootExpressionIs(const std::string& hlo_text,
const std::string& root_expression) {
AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,
false);
}
void AssertArrayWithConstantsForRootExpressionIs(
const std::string& hlo_text, const std::string& root_expression) {
AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,
true);
}
private:
std::string CanonicalizeWhitespace(const std::string& text) {
std::string result;
for (char c : text) {
if (!absl::ascii_isspace(c)) {
result.push_back(c);
} else if (!result.empty() && result.back() != ' ') {
result.push_back(' ');
}
}
while (!result.empty() && result.back() == ' ') {
result.pop_back();
}
return result;
}
void AssertArrayForRootExpressionIsImpl(const std::string& hlo_text,
const std::string& root_expression,
bool print_constants) {
IndexedArrayAnalysis indexed_tensor_analysis;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(IndexedArrayAnalysis::Array* const array_result,
indexed_tensor_analysis.GetArrayFor(
m->entry_computation()->root_instruction()));
std::string string_result = CanonicalizeWhitespace(
indexed_tensor_analysis.ToString(array_result, print_constants));
LOG(INFO) << string_result;
ASSERT_EQ(string_result, CanonicalizeWhitespace(root_expression));
}
};
TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneGather) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneConstantGather) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices = s32[5] parameter(0)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(
hlo_text, "(scalar-indexed-const (constant s32[3,3]) %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed0) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices = s32[5,2] parameter(0)
ROOT gather = s32[5] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed1) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3,1] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed2) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3,1] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,2,3] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={2},
start_index_map={0},
index_vector_dim=1,
slice_sizes={2,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed3) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[5] parameter(1)
ROOT gather = s32[5,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,2}
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%gather");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})
indices_a = s32[5] parameter(0)
indices_b = s32[2] parameter(1)
gather_a = s32[5,3] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
ROOT gather_b = s32[2,3] gather(gather_a, indices_b),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,3]) (scalar-indexed %indices_a "
"%indices_b 0->[0]) 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithOneToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,2] parameter(0)
indices_a = s32[5,7] parameter(1)
indices_b = s32[2] parameter(2)
gather_a = s32[5,3,7] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3,1}
ROOT gather_b = s32[5,3,2] gather(gather_a, indices_b),
offset_dims={0,1},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=1,
slice_sizes={5,3,1}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand (scalar-indexed "
"%indices_a %indices_b 1->[1]) 1->[0,2])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOneWithManyToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,6] parameter(0)
indices_a = s32[2] parameter(1)
indices_b = s32[5,7] parameter(2)
gather_a = s32[2,6] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT gather_b = s32[5,6,7] gather(gather_a, indices_b),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,6}
}
)";
AssertArrayForRootExpressionIs(hlo_text,
"(scalar-indexed %operand (scalar-indexed "
"%indices_a %indices_b 0->[0,1]) 0->[0,2])");
}
TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithManyToOne) {
std::string hlo_text = R"(
HloModule SimpleGather
ENTRY main {
operand = s32[3,2] parameter(0)
indices_a = s32[5,7] parameter(1)
indices_b = s32[4,8] parameter(2)
gather_a = s32[5,3,7] gather(operand, indices_a),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3,1}
ROOT gather_b = s32[4,5,3,8] gather(gather_a, indices_b),
offset_dims={1,2},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=2,
slice_sizes={5,3,1}
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed %operand (scalar-indexed %indices_a %indices_b "
"1->[0,2]) 1->[0,1,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather0) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5] parameter(0)
gather = s32[5,4] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text, "(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather1) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5,7] parameter(0)
gather = s32[5,4,7] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2,7] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather2) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,2,6] constant({
{{1,2,3,4,5,6},{1,2,3,4,5,6}},
{{1,2,3,4,5,6},{1,2,3,4,5,6}},
{{1,2,3,4,5,6},{1,2,3,4,5,6}}})
indices = s32[5,7] parameter(0)
gather = s32[5,2,6,7] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,2,6}
ROOT reshape = s32[5,3,4,7] reshape(gather)
}
)";
AssertArrayForRootExpressionIs(
hlo_text,
"(scalar-indexed-const (constant s32[3,3,4]) %indices 0->[0,3])");
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather3) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,6] constant({
{1,2,3,4,5,6},{1,2,3,4,5,6}})
indices = s32[1] parameter(0)
gather = s32[1,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT reshape = s32[1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,6])
(reshape %indices to s32[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather4) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 1, 2, 3 } })
i.0 = s64[1,3]{1,0} parameter(0)
g.0 = s32[1,3,3]{2,1,0} gather(operand, i.0), offset_dims={2},
collapsed_slice_dims={0}, start_index_map={0},
index_vector_dim=2, slice_sizes={1,3}
i.1 = s64[1] parameter(1)
g.1 = s32[1,1,3]{2,1,0} gather(g.0, i.1), offset_dims={0,2},
collapsed_slice_dims={1}, start_index_map={1},
index_vector_dim=1, slice_sizes={1,1,3}
ROOT reshape = s32[1,3]{1,0} reshape(g.1)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,3])
(reshape
(scalar-indexed %i.0 %i.1 1->[1])
to s64[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather5) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[1,6] constant({{1,2,3,4,5,6}})
indices = s32[1] parameter(0)
gather = s32[1,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,6}
ROOT reshape = s32[1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[1,1,1,6])
(reshape %indices to s32[])
0->[])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather6) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[1,2,6] constant({{
{1,2,3,4,5,6},{1,2,3,4,5,6}}})
indices = s32[1] parameter(0)
gather = s32[1,1,6] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={1,1,6}
ROOT reshape = s32[1,1,1,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,1,6] s32[2,1,1,1,6] {
{ { { { 1, 2, 3, 4, 5, 6 } } } },
{ { { { 1, 2, 3, 4, 5, 6 } } } } })
(reshape %indices to s32[])
0->[])
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text,
expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather7) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[2,6] constant({
{1,2,3,4,5,6},{1,2,3,4,5,6}})
indices = s32[1,5] parameter(0)
gather = s32[1,5,6] gather(operand, indices),
offset_dims={2},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,6}
ROOT reshape = s32[1,1,5,6] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(scalar-indexed-const
(constant s32[2,1,1,6] s32[2,1,1,6] {
{ { { 1, 2, 3, 4, 5, 6 } } },
{ { { 1, 2, 3, 4, 5, 6 } } } })
(reshape %indices to s32[5])
0->[2])
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text,
expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold0) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})
indices = s32[5,6] parameter(0)
gather = s32[5,4,6] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4}
ROOT reshape = s32[5,2,2,2,3] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,4])
%indices
0->[0,2])
to s32[5,2,2,2,3])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold1) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,5,2] constant({
{{1,2},{3,4},{5,6},{7,8},{9,10}},
{{1,2},{3,4},{5,6},{7,8},{9,10}},
{{1,2},{3,4},{5,6},{7,8},{9,10}}})
indices = s32[7] parameter(0)
gather = s32[3,2,7] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1,2}
ROOT reshape = s32[6,7] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,5,2])
%indices
1->[2])
to s32[6,7])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold2) {
std::string hlo_text = R"(
HloModule ReshapeOfGather
ENTRY main {
operand = s32[3,4,1] constant({
{{1},{2},{3},{4}},
{{1},{2},{3},{4}},
{{1},{2},{3},{4}}})
indices = s32[5,6] parameter(0)
gather = s32[5,4,6,1] gather(operand, indices),
offset_dims={1,3},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,4,1}
ROOT reshape = s32[5,2,2,2,3,1] reshape(gather)
}
)";
const char* expected_root_expression = R"(
(reshape
(scalar-indexed-const
(constant s32[3,4,1])
%indices
0->[0,2])
to s32[5,2,2,2,3,1])
)";
AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);
}
TEST_F(IndexedArrayAnalysisTest, UnaryOpOfGather) {
std::string hlo_text = R"(
HloModule UnaryOpOfGather
ENTRY main {
operand = f32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
indices = s32[5] parameter(0)
gather = f32[5,4] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT tanh = f32[5,4] tanh(gather)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant f32[3,4] f32[3,4] {
{ 0.761594176, 0.964027584, 0.995054781, 0.999329329 },
{ 0.761594176, 0.995054781, 0.964027584, 0.999329329 },
{ 0.999329329, 0.995054781, 0.964027584, 0.761594176 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedScalarWithGather) {
std::string hlo_text = R"(
HloModule AddBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 6, 7, 8, 9 },
{ 6, 8, 7, 9 },
{ 9, 8, 7, 6 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest,
SubtractBroadcastedScalarWithGather_GatherIsLhs) {
std::string hlo_text = R"(
HloModule SubtractBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT sub = s32[5,4] subtract(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ -4, -3, -2, -1 },
{ -4, -2, -3, -1 },
{ -1, -2, -3, -4 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest,
SubtractBroadcastedScalarWithGather_GatherIsRhs) {
std::string hlo_text = R"(
HloModule SubtractBroadcastedScalarWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant = s32[] constant(5)
constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT sub = s32[5,4] subtract(constant_broadcasted, gather)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 4, 3, 2, 1 },
{ 4, 2, 3, 1 },
{ 1, 2, 3, 4 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather) {
std::string hlo_text = R"(
HloModule AddBroadcastedVectorWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant_vect = s32[4] constant({10,11,12,13})
constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={1}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const (constant s32[3,4] s32[3,4] {
{ 11, 13, 15, 17 },
{ 11, 14, 14, 17 },
{ 14, 14, 14, 14 }
}) %indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather_Negative) {
std::string hlo_text = R"(
HloModule AddBroadcastedVectorWithGather
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})
constant_vect = s32[5] constant({10,11,12,13,14})
constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={0}
indices = s32[5] parameter(0)
gather = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT add = s32[5,4] add(gather, constant_broadcasted)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
TEST_F(IndexedArrayAnalysisTest, RegularUnaryOp) {
std::string hlo_text = R"(
HloModule RegularUnaryOp
ENTRY main {
input = f32[100] parameter(0)
ROOT tanh = f32[100] tanh(input)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%tanh");
}
TEST_F(IndexedArrayAnalysisTest, RegularBinaryOp) {
std::string hlo_text = R"(
HloModule RegularUnaryOp
ENTRY main {
input0 = f32[100] parameter(0)
input1 = f32[100] parameter(1)
ROOT add = f32[100] add(input0, input1)
}
)";
AssertArrayForRootExpressionIs(hlo_text, "%add");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_0) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_lhs = s32[5,4] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,4}
ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[3,3] s32[3,3] {
{ 70, 80, 90 },
{ 158, 184, 210 },
{ 246, 288, 330 } })
%indices 0->[0]))");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_1) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[3,3] constant({{1,2,3},{4,5,6},{7,8,9}})
indices = s32[5] parameter(0)
dot_lhs = s32[3,5] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,3] s32[4,3] {
{ 84, 99, 114 },
{ 96, 114, 132 },
{ 108, 129, 150 },
{ 120, 144, 168 } })
%indices 0->[1]))");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_2) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_rhs = s32[3,5] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,4] s32[4,4] {
{ 38, 44, 50, 56 },
{ 83, 98, 113, 128 },
{ 128, 152, 176, 200 },
{ 173, 206, 239, 272 } })
%indices 1->[1])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpBasic_3) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})
indices = s32[5] parameter(0)
dot_rhs = s32[5,3] gather(gather_operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,3}
ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[4,4] s32[4,4] {
{ 14, 32, 50, 68 },
{ 32, 77, 122, 167 },
{ 50, 122, 194, 266 },
{ 68, 167, 266, 365 } })
%indices 1->[0])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpWithBatch) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[2,3,2] constant({{{1,2},{3,4},{5,6}},{{7,8},{9,10},{11,12}}})
dot_lhs_constant = s32[2,2,3] constant({{{1,2,3},{4,5,6}},{{7,8,9},{10,11,12}}})
indices = s32[4] parameter(0)
dot_rhs = s32[2,3,4] gather(gather_operand, indices),
offset_dims={0,1},
collapsed_slice_dims={2},
start_index_map={2},
index_vector_dim=1,
slice_sizes={2,3,1}
ROOT dot = s32[2,2,4] dot(dot_lhs_constant, dot_rhs),
lhs_contracting_dims={2}, rhs_contracting_dims={1},
lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"(
(scalar-indexed-const
(constant s32[2,2,2] s32[2,2,2] {
{ { 22, 28 },
{ 49, 64 } },
{ { 220, 244 },
{ 301, 334 } } })
%indices 3->[2])
)");
}
TEST_F(IndexedArrayAnalysisTest, DotOpNegative) {
std::string hlo_text = R"(
HloModule DotOp
ENTRY main {
gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})
dot_rhs_constant = s32[2,3] constant({{1,2,3},{4,5,6}})
indices = s32[2] parameter(0)
dot_lhs = s32[3,2] gather(gather_operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3,1}
ROOT dot = s32[3,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
AssertArrayWithConstantsForRootExpressionIs(hlo_text, "%dot");
}
}
} |
1,961 | cpp | tensorflow/tensorflow | hlo_pass_pipeline | third_party/xla/xla/service/hlo_pass_pipeline.cc | third_party/xla/xla/service/hlo_pass_pipeline_test.cc | #ifndef XLA_SERVICE_HLO_PASS_PIPELINE_H_
#define XLA_SERVICE_HLO_PASS_PIPELINE_H_
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/compilation_stats.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/types.h"
namespace xla {
class PhaseOrderPipeline;
class HloPassPipeline : public HloPassInterface {
public:
explicit HloPassPipeline(const std::string& name,
CompilationStats* compilation_stats = nullptr)
: name_(name), compilation_stats_(compilation_stats) {
if (compilation_stats == nullptr) {
empty_compilation_stats_ = CompilationStats::MakeNoopStats();
compilation_stats_ = empty_compilation_stats_.get();
}
}
absl::string_view name() const override { return name_; }
template <typename T, typename... Args>
T& AddPass(Args&&... args) {
CHECK(!run_called_) << "AddPass cannot be called after Run";
auto pass = new T(std::forward<Args>(args)...);
passes_.push_back(std::unique_ptr<T>(pass));
return *pass;
}
template <typename T, typename... Args>
T& AddInvariantChecker(Args&&... args) {
CHECK(!run_called_) << "AddInvariantChecker cannot be called after Run";
auto pass = new T(std::forward<Args>(args)...);
invariant_checkers_.push_back(std::unique_ptr<T>(pass));
return *pass;
}
template <typename T, typename... Args>
void AddInvariantCheckerDebug(Args&&... args) {
#ifndef NDEBUG
AddInvariantChecker<T>(std::forward<Args>(args)...);
#endif
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
using HloPassInterface::RunOnModuleGroup;
absl::StatusOr<bool> RunOnModuleGroup(
HloModuleGroup* module_group,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
bool IsPassPipeline() override { return true; }
int PassesSize() { return passes_.size(); }
HloPassInterface& GetPass(int index) { return *passes_[index]; }
private:
std::vector<HloPassInterface*> GetEnabledPasses(
const DebugOptions& debug_options);
void MaybeDumpHloAndSaveFilenames(HloModuleGroup& module_group,
absl::string_view after_pass_name,
absl::string_view before_pass_name);
void MaybeDumpHloAndSaveFilenames(HloModule& module,
absl::string_view after_pass_name,
absl::string_view before_pass_name);
template <typename HloT>
absl::Status RunInvariantCheckers(HloT* hlo,
absl::string_view after_pass_name) {
return RunInvariantCheckers(hlo, after_pass_name, {});
}
template <typename HloT>
absl::Status RunInvariantCheckers(
HloT* hlo, absl::string_view after_pass_name,
const absl::flat_hash_set<absl::string_view>& execution_threads);
template <typename HloT>
absl::StatusOr<bool> RunPassesInternal(
HloT* hlo, const DebugOptions& debug_options,
const absl::flat_hash_set<absl::string_view>& execution_threads);
static absl::StatusOr<bool> RunHelper(
HloPassInterface* pass, HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed, pass->Run(module, execution_threads));
module->Cleanup();
return changed;
}
static absl::StatusOr<bool> RunHelper(
HloPassInterface* pass, HloModuleGroup* module_group,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
bool changed, pass->RunOnModuleGroup(module_group, execution_threads));
module_group->Cleanup();
return changed;
}
const std::string name_;
std::vector<std::unique_ptr<HloPassInterface>> passes_;
std::vector<std::unique_ptr<HloPassInterface>> invariant_checkers_;
bool run_called_ = false;
CompilationStats* compilation_stats_;
std::unique_ptr<CompilationStats> empty_compilation_stats_;
friend class ::xla::PhaseOrderPipeline;
};
}
#endif
#include "xla/service/hlo_pass_pipeline.h"
#include <functional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace {
void RecordPassStartMetadata(HloModule& module, const std::string& pass_name,
const std::string& pipeline_name) {
module.metadata()->RecordPassStart();
TF_CHECK_OK(module.metadata()->set_current_pass_name(pass_name));
TF_CHECK_OK(module.metadata()->set_current_pass_pipeline_name(pipeline_name));
}
void RecordPassStartMetadata(HloModuleGroup& module_group,
const std::string& pass_name,
const std::string& pipeline_name) {
for (HloModule* module : module_group.modules()) {
RecordPassStartMetadata(*module, pass_name, pipeline_name);
}
}
absl::Status AttemptRecordPassEndMetadata(HloModule& module,
const std::string& pass_name,
bool module_changed) {
TF_RETURN_IF_ERROR(
module.metadata()->set_current_pass_module_id(module.unique_id()));
TF_RETURN_IF_ERROR(
module.metadata()->set_current_pass_module_changed(module_changed));
TF_RETURN_IF_ERROR(module.metadata()->RecordPassEnd());
return absl::OkStatus();
}
void RecordPassEndMetadata(HloModule& module, const std::string& pass_name,
bool module_changed) {
absl::Status status =
AttemptRecordPassEndMetadata(module, pass_name, module_changed);
if (!status.ok()) {
LOG(FATAL) << status;
}
}
absl::Status AttemptRecordPassEndMetadata(HloModuleGroup& module_group,
const std::string& pass_name,
bool module_changed) {
for (HloModule* module : module_group.modules()) {
for (HloModule* other_module : module_group.modules()) {
TF_RETURN_IF_ERROR(
module->metadata()->add_current_pass_module_group_module_id(
other_module->unique_id()));
}
TF_RETURN_IF_ERROR(
AttemptRecordPassEndMetadata(*module, pass_name, module_changed));
}
return absl::OkStatus();
}
void RecordPassEndMetadata(HloModuleGroup& module_group,
const std::string& pass_name, bool module_changed) {
absl::Status status =
AttemptRecordPassEndMetadata(module_group, pass_name, module_changed);
if (!status.ok()) {
LOG(FATAL) << status;
}
}
}
template <typename HloT>
absl::Status HloPassPipeline::RunInvariantCheckers(
HloT* hlo, absl::string_view after_pass_name,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto& invariant_checker : invariant_checkers_) {
VLOG(1) << " Invariant checker " << invariant_checker->name();
absl::StatusOr<bool> changed_status =
RunHelper(invariant_checker.get(), hlo, execution_threads);
VLOG(1) << " Invariant checker done " << invariant_checker->name();
if (!changed_status.ok()) {
VLOG(2) << "Failed invariant check:";
XLA_VLOG_LINES(2, hlo->ToString());
return tsl::errors::CreateWithUpdatedMessage(
changed_status.status(),
absl::StrCat(changed_status.status().message(), "\n\nFailed after ",
after_pass_name));
}
TF_RET_CHECK(!changed_status.value())
<< "invariant checkers must not change the graph";
}
return absl::OkStatus();
}
namespace {
std::string UniqueId(const HloModule& mod) {
return std::to_string(mod.unique_id());
}
std::string UniqueId(const HloModuleGroup& group) {
return absl::StrJoin(group.modules(), "-",
[](std::string* out, const HloModule* mod) {
out->append(std::to_string(mod->unique_id()));
});
}
}
template <typename HloT>
absl::StatusOr<bool> HloPassPipeline::RunPassesInternal(
HloT* hlo, const DebugOptions& debug_options,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto passes = GetEnabledPasses(debug_options);
std::string dump_regex = debug_options.xla_dump_hlo_pass_re();
static constexpr absl::string_view kPipelineStart = "pipeline-start";
static constexpr absl::string_view kPipelineEnd = "pipeline-end";
std::string pipeline_name = std::string(name());
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaPassPipeline:#name=%s,module=%s,program_id=%s#",
pipeline_name, hlo->name(), UniqueId(*hlo));
}};
TF_RETURN_IF_ERROR(
RunInvariantCheckers(hlo, kPipelineStart, execution_threads));
RecordPassStartMetadata(*hlo, std::string(kPipelineStart), pipeline_name);
MaybeDumpHloAndSaveFilenames(*hlo,
kPipelineStart,
passes.empty()
? kPipelineEnd
: passes.front()->name());
RecordPassEndMetadata(*hlo, std::string(kPipelineStart),
false);
bool changed = false;
for (int i = 0; i < passes.size(); i++) {
HloPassInterface* pass = passes[i];
std::string pass_name = std::string(pass->name());
XLA_SCOPED_LOGGING_TIMER(absl::StrCat("HLO pass: ", pass_name));
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaPass:#name=%s,module=%s,program_id=%s#",
pass_name, hlo->name(), UniqueId(*hlo));
}};
VLOG(1) << " HLO pass " << pass_name;
VLOG(2) << " Module hash " << absl::HashOf(*hlo);
if (!pass->IsPassPipeline()) {
compilation_stats_->StartPass(pass_name);
}
RecordPassStartMetadata(*hlo, pass_name, pipeline_name);
auto status_or_changed = RunHelper(pass, hlo, execution_threads);
if (auto status = status_or_changed.status(); !status.ok()) {
compilation_stats_->RecordPassError(
pass_name, absl::StatusCodeToString(status.code()));
}
TF_ASSIGN_OR_RETURN(bool pass_changed, status_or_changed);
if (!dump_regex.empty() && (pass_changed || dump_regex != ".*")) {
MaybeDumpHloAndSaveFilenames(*hlo,
pass_name,
i + 1 >= passes.size()
? kPipelineEnd
: passes[i + 1]->name());
}
RecordPassEndMetadata(*hlo, pass_name, pass_changed);
changed |= pass_changed;
if (pass_changed) {
VLOG(3) << " Pass caused changes " << pass_name;
auto status = RunInvariantCheckers(hlo, pass_name, execution_threads);
if (!status.ok()) {
compilation_stats_->RecordPassError(
pass_name, absl::StatusCodeToString(status.code()));
}
TF_RETURN_IF_ERROR(status);
}
if (!pass->IsPassPipeline()) {
compilation_stats_->EndPass(pass_name);
}
}
return changed;
}
std::vector<HloPassInterface*> HloPassPipeline::GetEnabledPasses(
const DebugOptions& debug_options) {
if (debug_options.xla_disable_all_hlo_passes()) {
VLOG(1) << "*All* passes disabled by --xla_disable_all_hlo_passes.";
return {};
}
absl::flat_hash_set<std::string> disabled_pass_names(
debug_options.xla_disable_hlo_passes().begin(),
debug_options.xla_disable_hlo_passes().end());
absl::flat_hash_set<std::string> enabled_pass_names(
debug_options.xla_enable_hlo_passes_only().begin(),
debug_options.xla_enable_hlo_passes_only().end());
if (!disabled_pass_names.empty()) {
VLOG(1) << "Passes disabled by --xla_disable_hlo_passes: "
<< absl::StrJoin(disabled_pass_names, ", ");
}
if (!enabled_pass_names.empty()) {
VLOG(1) << "Passes enabled by --xla_enable_hlo_passes_only: "
<< absl::StrJoin(enabled_pass_names, ", ");
}
CHECK(disabled_pass_names.empty() || enabled_pass_names.empty());
if (disabled_pass_names.contains(name())) {
VLOG(1) << "Disable the full pass: " << name();
return {};
}
if (enabled_pass_names.contains(name())) {
VLOG(1) << "Enable the full pass: " << name();
enabled_pass_names.clear();
}
std::vector<HloPassInterface*> enabled_passes;
if (!enabled_pass_names.empty()) {
for (auto& pass : passes_) {
if (enabled_pass_names.contains(pass->name())) {
enabled_passes.push_back(pass.get());
}
}
} else {
for (auto& pass : passes_) {
if (!disabled_pass_names.contains(pass->name())) {
enabled_passes.push_back(pass.get());
}
}
}
return enabled_passes;
}
void HloPassPipeline::MaybeDumpHloAndSaveFilenames(
HloModule& module, absl::string_view after_pass_name,
absl::string_view before_pass_name) {
for (const std::string& filename : DumpHloModuleBetweenPassesIfEnabled(
name(), before_pass_name, after_pass_name, module)) {
absl::Status status =
module.metadata()->add_current_pass_dump_filename(filename);
if (!status.ok()) {
LOG(FATAL) << status;
}
}
}
void HloPassPipeline::MaybeDumpHloAndSaveFilenames(
HloModuleGroup& module_group, absl::string_view after_pass_name,
absl::string_view before_pass_name) {
for (HloModule* module : module_group.modules()) {
MaybeDumpHloAndSaveFilenames(*module, after_pass_name, before_pass_name);
}
}
absl::StatusOr<bool> HloPassPipeline::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
run_called_ = true;
VLOG(1) << "Running HLO pass pipeline on module " << module->name() << ": "
<< name();
return RunPassesInternal(module, module->config().debug_options(),
execution_threads);
}
absl::StatusOr<bool> HloPassPipeline::RunOnModuleGroup(
HloModuleGroup* module_group,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
run_called_ = true;
VLOG(1) << "Running HLO pass pipeline on module group "
<< module_group->name() << ": " << name();
if (module_group->modules().empty()) {
VLOG(1) << "Module group is empty. Nothing to do.";
return false;
}
return RunPassesInternal(module_group,
module_group->module(0).config().debug_options(),
execution_threads);
}
} | #include "xla/service/hlo_pass_pipeline.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
using ::testing::StrEq;
class HloPassPipelineTest : public HloTestBase {
protected:
absl::StatusOr<HloModuleGroup> ParseModuleGroup(
absl::Span<const std::string> hlo_strings) {
HloModuleGroup group(TestName());
for (const std::string& hlo_string : hlo_strings) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
group.push_back(std::move(module));
}
return std::move(group);
}
};
class FooToBarModulePass : public HloModulePass {
absl::string_view name() const override { return "foo2bar"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
bool changed = false;
for (HloComputation* computation :
module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == "foo") {
instruction->SetAndSanitizeName("bar");
changed = true;
}
}
}
return changed;
}
};
class ReverseStringModulePass : public HloModulePass {
absl::string_view name() const override { return "reverse"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
bool changed = false;
for (HloComputation* computation :
module->computations(execution_threads)) {
HloInstruction* root = computation->root_instruction();
std::string name(root->name());
std::reverse(name.begin(), name.end());
root->SetAndSanitizeName(name);
changed = true;
}
return changed;
}
};
class BazToQuxModuleGroupPass : public HloModuleGroupPass {
absl::string_view name() const override { return "baz2qux"; }
using HloPassInterface::RunOnModuleGroup;
absl::StatusOr<bool> RunOnModuleGroup(
HloModuleGroup* module_group,
const absl::flat_hash_set<absl::string_view>& execution_threads)
override {
bool changed = false;
for (HloModule* module : module_group->modules()) {
for (HloComputation* computation :
module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == "baz") {
instruction->SetAndSanitizeName("qux");
changed = true;
}
}
}
}
return changed;
}
};
class BarBlowerUpper : public HloModulePass {
absl::string_view name() const override { return "bar-blower-upper"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
for (HloComputation* computation :
module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == "bar") {
return Internal("Module has instruction named bar");
}
}
}
return false;
}
};
TEST_F(HloPassPipelineTest, ModulePassChanged) {
const std::string module_str = R"(
HloModule ModulePassChanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<FooToBarModulePass>();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(root->name(), "bar");
}
TEST_F(HloPassPipelineTest, ModulePassUnchanged) {
const std::string module_str = R"(
HloModule ModulePassUnchanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT blahblah = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<FooToBarModulePass>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(HloPassPipelineTest, ModulePassChangedForParallelThread) {
const std::string module_str = R"(
HloModule ModulePassChanged
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %Entry (p0: f32[10], p1: f32[10]) -> f32[10] {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
ROOT %baz = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<ReverseStringModulePass>();
HloInstruction* main_root = module->entry_computation()->root_instruction();
HloInstruction* parallel_thread_root =
main_root->async_wrapped_computation()->root_instruction();
EXPECT_EQ(main_root->name(), "baz");
EXPECT_EQ(parallel_thread_root->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed,
pipeline.Run(module.get(), {"parallel_thread"}));
EXPECT_TRUE(changed);
EXPECT_EQ(main_root->name(), "baz");
EXPECT_EQ(parallel_thread_root->name(), "oof");
}
TEST_F(HloPassPipelineTest, ModulePassChangedForAllexecution_threads) {
const std::string module_str = R"(
HloModule ModulePassChanged
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %Entry (p0: f32[10], p1: f32[10]) -> f32[10] {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
ROOT %baz = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<ReverseStringModulePass>();
HloInstruction* main_root = module->entry_computation()->root_instruction();
HloInstruction* parallel_thread_root =
main_root->async_wrapped_computation()->root_instruction();
EXPECT_EQ(main_root->name(), "baz");
EXPECT_EQ(parallel_thread_root->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(main_root->name(), "zab");
EXPECT_EQ(parallel_thread_root->name(), "oof");
}
TEST_F(HloPassPipelineTest, MixedPipeline) {
const std::string module_0_str = R"(
HloModule MixedPipeline.1
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT baz = f32[] multiply(a, b)
}
)";
const std::string module_1_str = R"(
HloModule MixedPipeline.0
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup module_group,
ParseModuleGroup({module_0_str, module_1_str}));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<BazToQuxModuleGroupPass>();
pipeline.AddPass<FooToBarModulePass>();
HloInstruction* root0 =
module_group.module(0).entry_computation()->root_instruction();
HloInstruction* root1 =
module_group.module(1).entry_computation()->root_instruction();
EXPECT_EQ(root0->name(), "baz");
EXPECT_EQ(root1->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed,
pipeline.RunOnModuleGroup(&module_group));
EXPECT_TRUE(changed);
EXPECT_EQ(root0->name(), "qux");
EXPECT_EQ(root1->name(), "bar");
}
TEST_F(HloPassPipelineTest, InvariantChecker) {
const std::string module_str = R"(
HloModule InvariantChecker
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
{
HloPassPipeline pipeline(TestName());
pipeline.AddInvariantChecker<BarBlowerUpper>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
}
{
HloPassPipeline pipeline(TestName());
pipeline.AddInvariantChecker<BarBlowerUpper>();
pipeline.AddPass<FooToBarModulePass>();
absl::Status status = pipeline.Run(module.get()).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(status.message(),
::testing::HasSubstr("Module has instruction named bar"));
EXPECT_THAT(status.message(), ::testing::HasSubstr("Failed after foo2bar"));
}
{
HloPassPipeline pipeline(TestName());
pipeline.AddInvariantChecker<BarBlowerUpper>();
absl::Status status = pipeline.Run(module.get()).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(status.message(),
::testing::HasSubstr("Module has instruction named bar"));
EXPECT_THAT(status.message(),
::testing::HasSubstr("Failed after pipeline-start"));
}
}
TEST_F(HloPassPipelineTest, ModuleGroupPassOnModule) {
const std::string module_str = R"(
HloModule ModuleGroupPassOnModule
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<BazToQuxModuleGroupPass>();
absl::Status status = pipeline.Run(module.get()).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(
status.message(),
::testing::HasSubstr("Module group pass cannot be run on a module"));
}
TEST_F(HloPassPipelineTest, SetHloModuleMetadata) {
HloModuleGroup module_group(TestName());
module_group.push_back(CreateNewVerifiedModule());
module_group.push_back(CreateNewVerifiedModule());
HloPassPipeline pipeline(TestName());
pipeline.AddPass<BazToQuxModuleGroupPass>();
pipeline.AddPass<FooToBarModulePass>();
TF_ASSERT_OK(pipeline.RunOnModuleGroup(&module_group).status());
ASSERT_THAT(module_group.modules(), SizeIs(2));
std::vector<std::string> pass_names = {"pipeline-start", "baz2qux",
"foo2bar"};
std::string pipeline_name = std::string(pipeline.name());
for (const HloModule* module : module_group.modules()) {
const HloModuleMetadataProto& metadata = module->metadata().proto();
EXPECT_EQ(metadata.canonical_module_id(), module->unique_id());
EXPECT_EQ(metadata.module_group_name(), module_group.name());
ASSERT_THAT(metadata.pass_metadata(), SizeIs(3));
for (int pass = 0; pass < metadata.pass_metadata().size(); pass++) {
const HloPassMetadata& pass_metadata = metadata.pass_metadata(pass);
EXPECT_NE(pass_metadata.pass_id(), 0);
EXPECT_THAT(pass_metadata.pass_name(), StrEq(pass_names[pass]));
EXPECT_THAT(pass_metadata.pipeline_name(), StrEq(pipeline_name));
EXPECT_FALSE(pass_metadata.module_changed());
EXPECT_EQ(pass_metadata.module_id(), module->unique_id());
EXPECT_THAT(pass_metadata.module_group_module_ids(),
ElementsAre(module_group.module(0).unique_id(),
module_group.module(1).unique_id()));
EXPECT_GT(pass_metadata.start_timestamp_usec(), 0);
EXPECT_LE(pass_metadata.start_timestamp_usec(),
pass_metadata.end_timestamp_usec());
}
}
}
}
} |
1,962 | cpp | tensorflow/tensorflow | select_and_scatter_expander | third_party/xla/xla/service/select_and_scatter_expander.cc | third_party/xla/xla/service/select_and_scatter_expander_test.cc | #ifndef XLA_SERVICE_SELECT_AND_SCATTER_EXPANDER_H_
#define XLA_SERVICE_SELECT_AND_SCATTER_EXPANDER_H_
#include "xla/service/op_expander_pass.h"
namespace xla {
class SelectAndScatterExpander : public OpExpanderPass {
public:
absl::string_view name() const override {
return "select_and_scatter_expander";
}
protected:
bool InstructionMatchesPattern(HloInstruction* inst) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* inst) override;
};
}
#endif
#include "xla/service/select_and_scatter_expander.h"
#include <numeric>
#include <vector>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/call_inliner.h"
namespace xla {
absl::StatusOr<HloInstruction*> SelectAndScatterExpander::ExpandInstruction(
HloInstruction* instruction) {
auto* computation = instruction->parent();
auto* sas = Cast<HloSelectAndScatterInstruction>(instruction);
auto* operand = sas->mutable_operand(0);
auto operand_shape = operand->shape();
auto* source = sas->mutable_operand(1);
auto* select = sas->select();
auto* init_value = sas->mutable_operand(2);
const auto iota_shape = ShapeUtil::ChangeElementType(operand_shape, S32);
const auto scalar_operand =
ShapeUtil::MakeScalarShape(operand->shape().element_type());
const auto scalar_iota =
ShapeUtil::MakeScalarShape(iota_shape.element_type());
const auto source_shape = source->shape();
const Shape iota_shape_reduced =
ShapeUtil::ChangeElementType(source_shape, S32);
std::vector<HloInstruction*> iotas;
iotas.reserve(operand_shape.rank());
for (int i = 0; i < operand_shape.rank(); ++i) {
iotas.push_back(
computation->AddInstruction(HloInstruction::CreateIota(iota_shape, i)));
}
HloComputation* new_comp = [&]() -> HloComputation* {
HloComputation::Builder builder(
absl::StrCat(select->name(), ".reduce_window"));
auto rhs_begin = static_cast<int64_t>(iotas.size() + 1);
auto first_iota_index = 1;
auto* neg_one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(-1)));
auto* first_lhs_iota =
builder.AddInstruction(HloInstruction::CreateParameter(
first_iota_index, scalar_iota, "iota_lhs"));
auto* first_rhs_iota =
builder.AddInstruction(HloInstruction::CreateParameter(
first_iota_index + rhs_begin, scalar_iota, "iota_lhs"));
auto* lhs_first_in_window =
builder.AddInstruction(HloInstruction::CreateCompare(
sas->select()->root_instruction()->shape(), first_lhs_iota, neg_one,
Comparison::Direction::kNe, {}));
auto* rhs_first_in_window =
builder.AddInstruction(HloInstruction::CreateCompare(
sas->select()->root_instruction()->shape(), first_rhs_iota, neg_one,
Comparison::Direction::kNe, {}));
auto rhs_not_first_in_window = builder.AddInstruction(
HloInstruction::CreateUnary(sas->select()->root_instruction()->shape(),
HloOpcode::kNot, rhs_first_in_window));
auto* operand_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_operand, "operand_lhs"));
auto* operand_rhs = builder.AddInstruction(HloInstruction::CreateParameter(
rhs_begin, scalar_operand, "operand_rhs"));
auto* call = builder.AddInstruction(
HloInstruction::CreateCall(sas->select()->root_instruction()->shape(),
{operand_lhs, operand_rhs}, sas->select()));
auto* pred = builder.AddInstruction(HloInstruction::CreateBinary(
call->shape(), HloOpcode::kAnd, call, lhs_first_in_window));
pred = builder.AddInstruction(HloInstruction::CreateBinary(
call->shape(), HloOpcode::kOr, pred, rhs_not_first_in_window));
std::vector<HloInstruction*> result_tuple;
result_tuple.push_back(builder.AddInstruction(HloInstruction::CreateTernary(
scalar_operand, HloOpcode::kSelect, pred, operand_lhs, operand_rhs)));
for (auto i = first_iota_index; i < rhs_begin; ++i) {
xla::HloInstruction *iota_lhs, *iota_rhs;
if (i == first_iota_index) {
iota_lhs = first_lhs_iota;
iota_rhs = first_rhs_iota;
} else {
iota_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(i, scalar_iota, "iota_lhs"));
iota_rhs = builder.AddInstruction(HloInstruction::CreateParameter(
i + rhs_begin, scalar_iota, "iota_rhs"));
}
result_tuple.push_back(
builder.AddInstruction(HloInstruction::CreateTernary(
scalar_iota, HloOpcode::kSelect, pred, iota_lhs, iota_rhs)));
}
builder.AddInstruction(HloInstruction::CreateTuple(result_tuple));
auto* result = select->parent()->AddEmbeddedComputation(builder.Build());
if (!CallInliner::Inline(call).ok()) {
return nullptr;
}
return result;
}();
if (!new_comp) {
return nullptr;
}
auto num_reduce_values = iotas.size() + 1;
std::vector<HloInstruction*> ops;
ops.reserve(num_reduce_values);
ops.push_back(operand);
ops.insert(ops.end(), iotas.begin(), iotas.end());
auto* neg_one = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(-1)));
std::vector<HloInstruction*> reduce_init_values;
reduce_init_values.reserve(num_reduce_values);
reduce_init_values.push_back(init_value);
for (auto i = 0; i < iotas.size(); ++i) {
reduce_init_values.push_back(neg_one);
}
std::vector<xla::Shape> shapes;
shapes.reserve(num_reduce_values);
shapes.push_back(source->shape());
for (auto i = 0; i < iotas.size(); ++i) {
shapes.push_back(iota_shape_reduced);
}
auto* reduce_window =
computation->AddInstruction(HloInstruction::CreateReduceWindow(
ShapeUtil::MakeTupleShape(shapes), ops, reduce_init_values,
sas->window(), new_comp));
std::vector<HloInstruction*> iota_indices;
std::vector<int64_t> broadcasted_iota_dims;
broadcasted_iota_dims.reserve(iota_shape_reduced.rank() + 1);
broadcasted_iota_dims.insert(broadcasted_iota_dims.end(),
iota_shape_reduced.dimensions().begin(),
iota_shape_reduced.dimensions().end());
broadcasted_iota_dims.push_back(1);
auto broadcasted_iota_shape = ShapeUtil::MakeShape(
iota_shape_reduced.element_type(), broadcasted_iota_dims);
for (int i = 1; i < num_reduce_values; ++i) {
auto* element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(reduce_window, i));
iota_indices.push_back(computation->AddInstruction(
HloInstruction::CreateReshape(broadcasted_iota_shape, element)));
}
std::vector<int64_t> scatter_dims(operand->shape().rank());
std::iota(scatter_dims.begin(), scatter_dims.end(), 0);
auto* broadcasted_init_value = computation->AddInstruction(
HloInstruction::CreateBroadcast(instruction->shape(), init_value, {}));
std::vector<int64_t> concatenated_iotas_dims;
concatenated_iotas_dims.reserve(iota_indices.front()->shape().rank());
concatenated_iotas_dims.insert(concatenated_iotas_dims.end(),
broadcasted_iota_dims.begin(),
broadcasted_iota_dims.end());
concatenated_iotas_dims.back() = static_cast<int64_t>(iota_indices.size());
auto* indices = computation->AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(iota_shape.element_type(), concatenated_iotas_dims),
iota_indices, iota_shape.rank()));
ScatterDimensionNumbers dim_nums =
HloScatterInstruction::MakeScatterDimNumbers(
{},
scatter_dims,
scatter_dims,
source->shape().rank());
return computation->AddInstruction(HloInstruction::CreateScatter(
sas->shape(), broadcasted_init_value,
indices, source,
sas->scatter(), dim_nums,
false, false));
}
bool SelectAndScatterExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kSelectAndScatter;
}
} | #include "xla/service/select_and_scatter_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
constexpr absl::string_view kModuleStr =
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
})";
class SelectAndScatterExpanderTest : public HloTestBase {
protected:
void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {
HloInstruction* inst = FindInstruction(module, inst_name);
inst->mutable_shape()->clear_layout();
}
};
TEST_F(SelectAndScatterExpanderTest, ReplacesSelectAndScatter) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RunAndFilecheckHloRewrite(kModuleStr, SelectAndScatterExpander(), R"(
CHECK-NOT: select-and-scatter
)");
}
TEST_F(SelectAndScatterExpanderTest, CreatesReduceAndScatter) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RunAndFilecheckHloRewrite(kModuleStr, SelectAndScatterExpander(), R"(
CHECK: reduce
CHECK: scatter
)");
}
}
} |
1,963 | cpp | tensorflow/tensorflow | value_range | third_party/xla/xla/service/value_range.cc | third_party/xla/xla/service/value_range_test.cc | #ifndef XLA_SERVICE_VALUE_RANGE_H_
#define XLA_SERVICE_VALUE_RANGE_H_
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/service/constant_value.h"
namespace xla {
class Range {
public:
Range()
: min_(ConstantValue::GetZero(64, false)),
max_(ConstantValue::GetZero(64, false)),
empty_(true),
is_linear_(false) {}
Range(const ConstantValue& min, const ConstantValue& max, bool is_linear)
: min_(min), max_(max), empty_(false), is_linear_(is_linear) {}
const ConstantValue& min() const { return min_; }
const ConstantValue& max() const { return max_; }
bool IsEmpty() const { return empty_; }
bool IsSingleValue() const { return !IsEmpty() && min_ == max_; }
bool IsLinear() const { return is_linear_; }
std::optional<int64_t> GetSingleSignedValue() const;
std::optional<int64_t> GetSingleUnsignedValue() const;
std::string ToString() const;
private:
ConstantValue min_;
ConstantValue max_;
bool empty_;
bool is_linear_;
};
Range RecursivelyIdentifyRange(
const HloInstruction* instr,
const absl::flat_hash_map<const HloInstruction*, Range>& predefined_ranges);
}
#endif
#include "xla/service/value_range.h"
#include <optional>
#include <string>
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
std::optional<int64_t> Range::GetSingleSignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetSignedValue();
}
std::optional<int64_t> Range::GetSingleUnsignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetUnsignedValue();
}
std::string Range::ToString() const {
if (IsEmpty()) {
return std::string("Empty");
}
return absl::StrCat("min: ", min_.ToString(), " max: ", max_.ToString());
}
Range RecursivelyIdentifyRange(
const HloInstruction* instr,
const absl::flat_hash_map<const HloInstruction*, Range>&
predefined_ranges) {
if ((!instr->shape().IsInteger() && instr->shape().element_type() != PRED) ||
instr->shape().dimensions_size() != 0) {
return Range{};
}
VLOG(5) << "Computing Range for " << instr->ToString();
auto it = predefined_ranges.find(instr);
if (it != predefined_ranges.end()) {
VLOG(5) << "Found range! " << it->second.max().GetSignedValue() << " "
<< it->second.min().GetSignedValue();
return it->second;
}
switch (instr->opcode()) {
case HloOpcode::kCompare: {
VLOG(5) << "Handling Compare";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (instr->comparison_direction() != ComparisonDirection::kLt) {
return Range{};
}
if (lhs.max().lt(rhs.min())) {
return Range{ConstantValue::GetOne(1, false),
ConstantValue::GetOne(1, false),
true};
}
if (!lhs.min().lt(rhs.max())) {
return Range{
ConstantValue::GetZero(1, false),
ConstantValue::GetZero(1, false),
true};
}
VLOG(5) << "Compare failed";
VLOG(5) << "rhs max " << rhs.max().GetSignedValue() << " rhs min "
<< rhs.min().GetSignedValue() << " lhs max "
<< lhs.max().GetSignedValue() << " lhs min "
<< lhs.min().GetSignedValue();
return Range{};
}
case HloOpcode::kConstant: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Constant";
const int64_t bitwidth =
primitive_util::BitWidth(instr->shape().element_type());
const bool is_signed =
primitive_util::IsSignedIntegralType(instr->shape().element_type());
if (is_signed) {
const int64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetSigned(value, bitwidth),
ConstantValue::GetSigned(value, bitwidth),
true};
}
const uint64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetUnsigned(value, bitwidth),
ConstantValue::GetUnsigned(value, bitwidth),
true};
}
case HloOpcode::kAdd: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Add";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().add(rhs.min());
ConstantValue max = lhs.max().add(rhs.max());
if (max.lt(min)) {
VLOG(5) << "Add wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
case HloOpcode::kSelect: {
VLOG(5) << "Handling Select";
const HloInstruction* cmp = instr->operand(0);
Range cmp_range = RecursivelyIdentifyRange(cmp, predefined_ranges);
if (cmp_range.IsEmpty() || !cmp_range.IsSingleValue()) {
VLOG(5) << "Select failed";
return Range{};
}
if (cmp_range.GetSingleSignedValue() == 0) {
return RecursivelyIdentifyRange(instr->operand(2), predefined_ranges);
}
return RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
}
case HloOpcode::kSubtract: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Subtract";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().sub(rhs.max());
ConstantValue max = lhs.max().sub(rhs.min());
if (max.lt(min)) {
VLOG(5) << "Subtract wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
default:
break;
}
VLOG(5) << "Unsupported instruction: " << instr->ToString();
return Range{};
}
} | #include "xla/service/value_range.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ValueRangeTest : public HloTestBase {};
TEST_F(ValueRangeTest, AddedValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 124);
EXPECT_EQ(range.max().GetSignedValue(), 129);
}
TEST_F(ValueRangeTest, AddedValueUnsigned) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = u16[] constant(32768)
p0 = u16[] parameter(0)
ROOT %a = u16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, false),
ConstantValue::GetUnsigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetUnsignedValue(), 32768);
EXPECT_EQ(range.max().GetUnsignedValue(), 32773);
}
TEST_F(ValueRangeTest, SubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), -124);
EXPECT_EQ(range.max().GetSignedValue(), -119);
}
TEST_F(ValueRangeTest, SelectValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(p0, c0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), -119);
EXPECT_EQ(range.min().GetSignedValue(), -124);
}
TEST_F(ValueRangeTest, SelectValue2) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(c0, p0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(1);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), 129);
EXPECT_EQ(range.min().GetSignedValue(), 124);
}
TEST_F(ValueRangeTest, AddSubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
c1 = s32[] constant(12)
c2 = s32[] constant(5)
p0 = s32[] parameter(0)
sub = s32[] subtract(p0, c0)
a = s32[] add(sub, c1)
sub2 = s32[] subtract(c2, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(1)->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 112);
EXPECT_EQ(range.max().GetSignedValue(), 117);
}
TEST_F(ValueRangeTest, SubtractWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetSigned(-32768, 16),
ConstantValue::GetZero(16, true),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
TEST_F(ValueRangeTest, AddWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetZero(16, true),
ConstantValue::GetSigned(32760, 16),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
}
} |
1,964 | cpp | tensorflow/tensorflow | collective_permute_decomposer | third_party/xla/xla/service/collective_permute_decomposer.cc | third_party/xla/xla/service/collective_permute_decomposer_test.cc | #ifndef XLA_SERVICE_COLLECTIVE_PERMUTE_DECOMPOSER_H_
#define XLA_SERVICE_COLLECTIVE_PERMUTE_DECOMPOSER_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteDecomposer : public HloModulePass {
public:
explicit CollectivePermuteDecomposer(int64_t threshold_in_bytes)
: threshold_in_bytes_(threshold_in_bytes) {}
absl::string_view name() const override {
return "collective-permute-decomposer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t threshold_in_bytes_;
};
}
#endif
#include "xla/service/collective_permute_decomposer.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
bool HasCycles(const SourceTargetPairs& pairs) {
tensorflow::GraphCycles graph;
absl::flat_hash_map<int64_t, int32_t> replica_to_node_id;
auto get_node_id = [&](int64_t replica) {
auto it_and_inserted = replica_to_node_id.emplace(replica, -1);
auto it = it_and_inserted.first;
auto inserted = it_and_inserted.second;
if (inserted) {
it->second = graph.NewNode();
}
return it->second;
};
for (auto pair : pairs) {
auto source = get_node_id(pair.first);
auto target = get_node_id(pair.second);
VLOG(3) << "See source " << source << " -> target " << target;
if (!graph.InsertEdge(source, target)) {
VLOG(3) << "Detected cycles";
return true;
}
}
return false;
}
bool ShouldDecompose(const HloCollectivePermuteInstruction& collective_permute,
int64_t threshold_in_bytes) {
if (!collective_permute.channel_id().has_value()) {
return false;
}
const Shape& result_shape = collective_permute.shape();
if (!result_shape.IsArray()) {
return false;
}
if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) {
return false;
}
return !HasCycles(collective_permute.source_target_pairs());
}
bool MayPipeline(const HloCollectivePermuteInstruction& collective_permute) {
const HloInstruction* data = collective_permute.operand(0);
return (data->opcode() == HloOpcode::kGetTupleElement &&
data->operand(0)->opcode() == HloOpcode::kParameter);
}
absl::Status DecomposeCollectivePermute(
HloCollectivePermuteInstruction* collective_permute,
HloComputation* computation, const std::string& pipeline_decision) {
int64_t channel_id = collective_permute->channel_id().value();
HloInstruction* data = collective_permute->mutable_operand(0);
const Shape& data_shape = data->shape();
const OpMetadata& metadata = collective_permute->metadata();
const xla::FrontendAttributes& old_attributes =
collective_permute->frontend_attributes();
xla::FrontendAttributes attributes;
std::string source_target_pairs_string =
"{" +
absl::StrJoin(collective_permute->source_target_pairs(), ",",
absl::PairFormatter(
[](std::string* out, int64_t value) {
absl::StrAppend(out, "{", value);
},
",",
[](std::string* out, int64_t value) {
absl::StrAppend(out, value, "}");
})) +
"}";
attributes.mutable_map()->insert(old_attributes.map().begin(),
old_attributes.map().end());
(*attributes.mutable_map())[kSendRecvSourceTargetPairsAttr] =
source_target_pairs_string;
HloInstruction* after_all =
computation->AddInstruction(HloInstruction::CreateToken());
HloInstruction* recv = computation->AddInstruction(
HloInstruction::CreateRecv(data_shape, after_all, channel_id));
recv->add_frontend_attributes(attributes);
recv->set_metadata(metadata);
HloInstruction* send = computation->AddInstruction(
HloInstruction::CreateSend(data, after_all, channel_id));
send->add_frontend_attributes(attributes);
send->set_metadata(metadata);
HloInstruction* recv_done =
computation->AddInstruction(HloInstruction::CreateRecvDone(recv));
HloInstruction* send_done =
computation->AddInstruction(HloInstruction::CreateSendDone(send));
TF_RETURN_IF_ERROR(send->AddControlDependencyTo(recv_done));
HloInstruction* recv_data = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(recv_done, 0));
TF_RETURN_IF_ERROR(collective_permute->ReplaceAllUsesWith(recv_data));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(collective_permute));
if (!pipeline_decision.empty()) {
xla::FrontendAttributes attributes;
(*attributes.mutable_map())[kSendRecvPipelineAttr] = pipeline_decision;
send->add_frontend_attributes(attributes);
send_done->add_frontend_attributes(attributes);
recv->add_frontend_attributes(attributes);
recv_done->add_frontend_attributes(attributes);
}
return absl::OkStatus();
}
bool IsForwardCycle(const SourceTargetPair& backedge,
const SourceTargetPairs& others) {
int64_t num_pairs = others.size() + 1;
if (backedge.first != num_pairs - 1 || backedge.second != 0) {
return false;
}
for (int64_t i = 0; i < num_pairs - 1; ++i) {
const SourceTargetPair& pair = others[i];
if (pair.first != i || pair.second != i + 1) {
return false;
}
}
return true;
}
bool IsBackwardCycle(const SourceTargetPair& backedge,
const SourceTargetPairs& others) {
int64_t num_pairs = others.size() + 1;
if (backedge.first != 0 || backedge.second != num_pairs - 1) {
return false;
}
for (int64_t i = 0; i < num_pairs - 1; ++i) {
const SourceTargetPair& pair = others[i];
if (pair.first != i + 1 || pair.second != i) {
return false;
}
}
return true;
}
std::optional<std::pair<HloCollectivePermuteInstruction*,
HloCollectivePermuteInstruction*>>
CheckCyclePatterns(HloCollectivePermuteInstruction* cp0,
HloCollectivePermuteInstruction* cp1) {
const SourceTargetPairs& cp0_pairs = cp0->source_target_pairs();
const SourceTargetPairs& cp1_pairs = cp1->source_target_pairs();
if (cp0_pairs.size() == 1) {
if (IsForwardCycle(cp0_pairs.front(), cp1_pairs) ||
IsBackwardCycle(cp0_pairs.front(), cp1_pairs)) {
return std::make_pair(cp0, cp1);
}
}
if (cp1_pairs.size() == 1) {
if (IsForwardCycle(cp1_pairs.front(), cp0_pairs) ||
IsBackwardCycle(cp1_pairs.front(), cp0_pairs)) {
return std::make_pair(cp1, cp0);
}
}
return std::nullopt;
}
}
absl::StatusOr<bool> CollectivePermuteDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> all_computations =
module->MakeComputationPostOrder(execution_threads);
absl::flat_hash_set<HloComputation*> while_bodies;
for (auto iter = all_computations.rbegin(); iter != all_computations.rend();
++iter) {
HloComputation* computation = *iter;
bool may_pipeline = while_bodies.contains(computation);
std::vector<HloCollectivePermuteInstruction*> cps_to_decompose;
HloCollectivePermuteInstruction* cp0_to_pipeline = nullptr;
HloCollectivePermuteInstruction* cp1_to_pipeline = nullptr;
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kWhile) {
while_bodies.insert(hlo->while_body());
continue;
}
if (hlo->opcode() != HloOpcode::kCollectivePermute) {
continue;
}
HloCollectivePermuteInstruction* cp =
Cast<HloCollectivePermuteInstruction>(hlo);
if (!ShouldDecompose(*cp, threshold_in_bytes_)) {
continue;
}
cps_to_decompose.push_back(cp);
if (!while_bodies.contains(computation) || !may_pipeline) {
continue;
}
if (cp0_to_pipeline != nullptr && cp1_to_pipeline != nullptr) {
continue;
}
if (!MayPipeline(*cp)) {
continue;
}
if (cp0_to_pipeline == nullptr) {
cp0_to_pipeline = cp;
continue;
}
auto optional_pair = CheckCyclePatterns(cp0_to_pipeline, cp);
if (optional_pair.has_value()) {
cp0_to_pipeline = optional_pair.value().first;
cp1_to_pipeline = optional_pair.value().second;
}
}
for (HloCollectivePermuteInstruction* cp : cps_to_decompose) {
std::string pipeline_decision;
if (cp0_to_pipeline == cp) {
pipeline_decision = "0";
} else if (cp1_to_pipeline == cp) {
pipeline_decision = "1";
}
TF_RETURN_IF_ERROR(
DecomposeCollectivePermute(cp, computation, pipeline_decision));
}
if (!cps_to_decompose.empty()) {
changed = true;
}
}
return changed;
}
} | #include "xla/service/collective_permute_decomposer.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
namespace op = xla::testing::opcode_matchers;
using CollectivePermuteDecomposerTest = HloTestBase;
TEST_F(CollectivePermuteDecomposerTest, WithCycleNotTransformed) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, WithContextDataNotTransformed) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = (u32[], u32[], u32[], u32[]) collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, TransformedExplicitChannelId) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto check_metadata = [](const HloInstruction* inst) {
EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add");
EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py");
EXPECT_EQ(inst->metadata().source_line(), 35);
};
auto check_not_pipelined = [](const HloInstruction* instr) {
const FrontendAttributes& attributes = instr->frontend_attributes();
EXPECT_EQ(attributes.map().end(),
attributes.map().find(kSendRecvPipelineAttr));
};
HloInstruction* after_all = FindInstruction(module.get(), "after-all");
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->operand(0), after_all);
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(
recv->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3},{3,4}}\""));
check_metadata(recv);
check_not_pipelined(recv);
HloInstruction* recv_done = FindInstruction(module.get(), "recv-done");
EXPECT_EQ(recv_done->operand(0), recv);
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_EQ(send->operand(1), after_all);
EXPECT_EQ(send->channel_id().value(), 1);
EXPECT_THAT(
send->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3},{3,4}}\""));
check_metadata(send);
check_not_pipelined(send);
HloInstruction* send_done = FindInstruction(module.get(), "send-done");
EXPECT_EQ(send_done->operand(0), send);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::GetTupleElement(recv_done, 0));
}
TEST_F(CollectivePermuteDecomposerTest, NotTransformedDefaultChannelId) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, ThresholdNotTransformed) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
p = u32[] replica-id()
ROOT cp = u32[] collective-permute(p), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},
metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(8);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CollectivePermuteDecomposerTest, Pipeline1) {
const char* const kModuleStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
recv-data = u32[2] collective-permute(send-data), channel_id=1,
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},
frontend_attributes={_xla_other_attribute="xyz"}
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(
recv->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3},{3,4}}\""));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_other_attribute=\"xyz\""));
HloInstruction* recv_done = FindInstruction(module.get(), "recv-done");
EXPECT_THAT(recv_done->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_EQ(send->channel_id().value(), 1);
EXPECT_THAT(
send->ToString(),
HasSubstr(
"_xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3},{3,4}}\""));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_other_attribute=\"xyz\""));
HloInstruction* send_done = FindInstruction(module.get(), "send-done");
EXPECT_THAT(send_done->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"0\""));
EXPECT_FALSE(recv_done->control_predecessors().empty());
EXPECT_EQ(recv_done->control_predecessors()[0], send);
}
TEST_F(CollectivePermuteDecomposerTest, ForwardPipeline2) {
const char* const kModuleStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
recv-data.0 = u32[2] collective-permute(send-data), channel_id=1,
source_target_pairs={{3,0}}
recv-data.1 = u32[2] collective-permute(send-data), channel_id=2,
source_target_pairs={{0,1}, {1,2}, {2,3}}
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(recv->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{3,0}}\""));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_THAT(send->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{3,0}}\""));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* recv1 = FindInstruction(module.get(), "recv.1");
EXPECT_EQ(recv1->channel_id().value(), 2);
EXPECT_THAT(
recv1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3}}\""));
EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* recv_done1 = FindInstruction(module.get(), "recv-done.1");
EXPECT_THAT(recv_done1->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* send1 = FindInstruction(module.get(), "send.1");
EXPECT_THAT(
send1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3}}\""));
EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* send_done1 = FindInstruction(module.get(), "send-done.1");
EXPECT_THAT(send_done1->ToString(),
HasSubstr("_xla_send_recv_pipeline=\"1\""));
}
TEST_F(CollectivePermuteDecomposerTest, BackwardPipeline2) {
const char* const kModuleStr = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
recv-data.0 = u32[2] collective-permute(send-data), channel_id=1,
source_target_pairs={{1,0},{2,1},{3,2}}
recv-data.1 = u32[2] collective-permute(send-data), channel_id=2,
source_target_pairs={{0,3}}
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=NE
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
CollectivePermuteDecomposer decomposer(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* recv = FindInstruction(module.get(), "recv");
EXPECT_EQ(recv->channel_id().value(), 1);
EXPECT_THAT(
recv->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{1,0},{2,1},{3,2}}\""));
EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* send = FindInstruction(module.get(), "send");
EXPECT_THAT(
send->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{1,0},{2,1},{3,2}}\""));
EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\""));
HloInstruction* recv1 = FindInstruction(module.get(), "recv.1");
EXPECT_EQ(recv1->channel_id().value(), 2);
EXPECT_THAT(recv1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{0,3}}\""));
EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
HloInstruction* send1 = FindInstruction(module.get(), "send.1");
EXPECT_THAT(send1->ToString(),
HasSubstr("_xla_send_recv_source_target_pairs=\"{{0,3}}\""));
EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\""));
}
}
} |
1,965 | cpp | tensorflow/tensorflow | convert_memory_placement_to_internal_annotations | third_party/xla/xla/service/convert_memory_placement_to_internal_annotations.cc | third_party/xla/xla/service/convert_memory_placement_to_internal_annotations_test.cc | #ifndef XLA_SERVICE_CONVERT_MEMORY_PLACEMENT_TO_INTERNAL_ANNOTATIONS_H_
#define XLA_SERVICE_CONVERT_MEMORY_PLACEMENT_TO_INTERNAL_ANNOTATIONS_H_
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConvertMemoryPlacementToInternalAnnotations : public HloModulePass {
public:
ConvertMemoryPlacementToInternalAnnotations() = default;
absl::string_view name() const override {
return "convert-memory-placement-to-internal-annotations";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/side_effect_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> ConvertMemoryPlacementToInternalAnnotations::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* c : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : c->MakeInstructionPostOrder()) {
if (instruction->IsCustomCall(
host_memory_offload_annotations::kDevicePlacement)) {
const auto& frontend_attributes = instruction->frontend_attributes();
const auto it = frontend_attributes.map().find(kXlaBufferPlacementAttr);
if (it == frontend_attributes.map().end()) {
continue;
}
const bool is_to_host_case =
(it->second ==
host_memory_offload_annotations::kMemoryTargetPinnedHost ||
it->second ==
host_memory_offload_annotations::kMemoryTargetUnpinnedHost);
const bool is_to_device_case =
(it->second ==
host_memory_offload_annotations::kMemoryTargetDevice);
if (!is_to_host_case && !is_to_device_case) {
continue;
}
if (is_to_host_case) {
VLOG(1) << "Process forward case: " << instruction->ToString();
if (instruction->operand_count() != 1) {
return Internal(
"Custom calls with target %s must have exactly one operand. %s "
"has %d.",
host_memory_offload_annotations::kDevicePlacement,
instruction->name(), instruction->operand_count());
}
HloInstruction* input = instruction->mutable_operand(0);
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(
c->AddInstruction(HloInstruction::CreateCustomCall(
input->shape(), {input},
host_memory_offload_annotations::
kMoveToHostCustomCallTarget))));
TF_RETURN_IF_ERROR(
c->RemoveInstructionAndUnusedOperands(instruction));
changed = true;
} else if (is_to_device_case) {
VLOG(1) << "Process backward case: " << instruction->ToString();
HloInstruction* custom_call_operand = instruction->mutable_operand(0);
HloInstruction* new_result =
c->AddInstruction(HloInstruction::CreateCustomCall(
custom_call_operand->shape(), {custom_call_operand},
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget));
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_result));
TF_RETURN_IF_ERROR(
c->RemoveInstructionAndUnusedOperands(instruction));
changed = true;
}
}
}
}
return changed;
}
} | #include "xla/service/convert_memory_placement_to_internal_annotations.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ConvertMemoryPlacementToInternalAnnotationsTest : public HloTestBase {
public:
ConvertMemoryPlacementToInternalAnnotationsTest() = default;
};
TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest, ConvertPinnedHostTest) {
const char* hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}}
region_0.9 {
arg_tuple.10 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.11 = s32[] get-tuple-element(arg_tuple.10), index=0
constant.15 = s32[] constant(1)
add.33 = s32[] add(get-tuple-element.11, constant.15)
get-tuple-element.12 = f32[16]{0} get-tuple-element(arg_tuple.10), index=1
sine.18 = f32[16]{0} sine(get-tuple-element.12)
sine.19 = f32[16]{0} sine(sine.18)
sine.20 = f32[16]{0} sine(sine.19)
get-tuple-element.13 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=2
custom-call.21 = f32[16]{0} custom-call(sine.19), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"}
reshape.23 = f32[1,16]{1,0} reshape(custom-call.21)
constant.17 = s32[] constant(0)
compare.24 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
constant.16 = s32[] constant(16)
add.25 = s32[] add(get-tuple-element.11, constant.16)
select.26 = s32[] select(compare.24, add.25, get-tuple-element.11)
dynamic-update-slice.27 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.13, reshape.23, select.26, constant.17)
get-tuple-element.14 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=3
custom-call.22 = f32[16]{0} custom-call(sine.20), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"}
reshape.28 = f32[1,16]{1,0} reshape(custom-call.22)
compare.29 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
add.30 = s32[] add(get-tuple-element.11, constant.16)
select.31 = s32[] select(compare.29, add.30, get-tuple-element.11)
dynamic-update-slice.32 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.14, reshape.28, select.31, constant.17)
ROOT tuple.34 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.33, sine.20, dynamic-update-slice.27, dynamic-update-slice.32)
}
region_1.35 {
arg_tuple.36 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.38 = f32[16]{0} get-tuple-element(arg_tuple.36), index=1
get-tuple-element.39 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=2
get-tuple-element.40 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=3
get-tuple-element.37 = s32[] get-tuple-element(arg_tuple.36), index=0
constant.41 = s32[] constant(16)
ROOT compare.42 = pred[] compare(get-tuple-element.37, constant.41), direction=LT
}
core_closed_call.43 {
constant.47 = s32[] constant(0)
Arg_0.44 = f32[16]{0} parameter(0)
constant.45 = f32[] constant(0)
broadcast.46 = f32[16,16]{1,0} broadcast(constant.45), dimensions={}
tuple.48 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.47, Arg_0.44, broadcast.46, broadcast.46)
while.49 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.48), condition=region_1.35, body=region_0.9
get-tuple-element.50 = s32[] get-tuple-element(while.49), index=0
get-tuple-element.51 = f32[16]{0} get-tuple-element(while.49), index=1
get-tuple-element.52 = f32[16,16]{1,0} get-tuple-element(while.49), index=2
get-tuple-element.53 = f32[16,16]{1,0} get-tuple-element(while.49), index=3
ROOT tuple.54 = (f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(get-tuple-element.52, get-tuple-element.53)
}
region_2.65 {
arg_tuple.66 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(arg_tuple.66), index=0
constant.74 = s32[] constant(1)
add.108 = s32[] add(get-tuple-element.67, constant.74)
get-tuple-element.73 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=6
constant.76 = s32[] constant(0)
compare.82 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
constant.75 = s32[] constant(16)
add.83 = s32[] add(get-tuple-element.67, constant.75)
select.84 = s32[] select(compare.82, add.83, get-tuple-element.67)
dynamic-slice.85 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.73, select.84, constant.76), dynamic_slice_sizes={1,16}
reshape.86 = f32[16]{0} reshape(dynamic-slice.85)
custom-call.87 = f32[16]{0} custom-call(reshape.86), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
get-tuple-element.69 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=2
get-tuple-element.68 = f32[16]{0} get-tuple-element(arg_tuple.66), index=1
cosine.88 = f32[16]{0} cosine(get-tuple-element.68)
reshape.93 = f32[1,16]{1,0} reshape(cosine.88)
compare.94 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.95 = s32[] add(get-tuple-element.67, constant.75)
select.96 = s32[] select(compare.94, add.95, get-tuple-element.67)
dynamic-update-slice.97 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.69, reshape.93, select.96, constant.76)
get-tuple-element.70 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=3
sine.89 = f32[16]{0} sine(get-tuple-element.68)
cosine.90 = f32[16]{0} cosine(sine.89)
reshape.98 = f32[1,16]{1,0} reshape(cosine.90)
compare.99 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.100 = s32[] add(get-tuple-element.67, constant.75)
select.101 = s32[] select(compare.99, add.100, get-tuple-element.67)
dynamic-update-slice.102 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.70, reshape.98, select.101, constant.76)
get-tuple-element.71 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=4
get-tuple-element.72 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=5
compare.77 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.78 = s32[] add(get-tuple-element.67, constant.75)
select.79 = s32[] select(compare.77, add.78, get-tuple-element.67)
dynamic-slice.80 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.72, select.79, constant.76), dynamic_slice_sizes={1,16}
reshape.81 = f32[16]{0} reshape(dynamic-slice.80)
custom-call.91 = f32[16]{0} custom-call(reshape.81), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
cosine.92 = f32[16]{0} cosine(custom-call.91)
reshape.103 = f32[1,16]{1,0} reshape(cosine.92)
compare.104 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.105 = s32[] add(get-tuple-element.67, constant.75)
select.106 = s32[] select(compare.104, add.105, get-tuple-element.67)
dynamic-update-slice.107 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.71, reshape.103, select.106, constant.76)
ROOT tuple.109 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.108, custom-call.87, dynamic-update-slice.97, dynamic-update-slice.102, dynamic-update-slice.107, get-tuple-element.72, get-tuple-element.73)
}
region_3.110 {
arg_tuple.111 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.113 = f32[16]{0} get-tuple-element(arg_tuple.111), index=1
get-tuple-element.114 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=2
get-tuple-element.115 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=3
get-tuple-element.116 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=4
get-tuple-element.117 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=5
get-tuple-element.118 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=6
get-tuple-element.112 = s32[] get-tuple-element(arg_tuple.111), index=0
constant.119 = s32[] constant(16)
ROOT compare.120 = pred[] compare(get-tuple-element.112, constant.119), direction=LT
}
region_4.130 {
arg_tuple.131 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.132 = s32[] get-tuple-element(arg_tuple.131), index=0
constant.140 = s32[] constant(1)
add.164 = s32[] add(get-tuple-element.132, constant.140)
get-tuple-element.133 = f32[16]{0} get-tuple-element(arg_tuple.131), index=1
get-tuple-element.134 = f32[] get-tuple-element(arg_tuple.131), index=2
broadcast.159 = f32[16]{0} broadcast(get-tuple-element.134), dimensions={}
add.160 = f32[16]{0} add(get-tuple-element.133, broadcast.159)
get-tuple-element.137 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=5
constant.141 = s32[] constant(16)
subtract.142 = s32[] subtract(constant.141, get-tuple-element.132)
subtract.143 = s32[] subtract(subtract.142, constant.140)
constant.139 = s32[] constant(0)
compare.154 = pred[] compare(subtract.143, constant.139), direction=LT
add.155 = s32[] add(subtract.143, constant.141)
select.156 = s32[] select(compare.154, add.155, subtract.143)
dynamic-slice.157 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.137, select.156, constant.139), dynamic_slice_sizes={1,16}
reshape.158 = f32[16]{0} reshape(dynamic-slice.157)
multiply.161 = f32[16]{0} multiply(add.160, reshape.158)
get-tuple-element.136 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=4
compare.149 = pred[] compare(subtract.143, constant.139), direction=LT
add.150 = s32[] add(subtract.143, constant.141)
select.151 = s32[] select(compare.149, add.150, subtract.143)
dynamic-slice.152 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.136, select.151, constant.139), dynamic_slice_sizes={1,16}
reshape.153 = f32[16]{0} reshape(dynamic-slice.152)
multiply.162 = f32[16]{0} multiply(multiply.161, reshape.153)
get-tuple-element.135 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=3
compare.144 = pred[] compare(subtract.143, constant.139), direction=LT
add.145 = s32[] add(subtract.143, constant.141)
select.146 = s32[] select(compare.144, add.145, subtract.143)
dynamic-slice.147 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.135, select.146, constant.139), dynamic_slice_sizes={1,16}
reshape.148 = f32[16]{0} reshape(dynamic-slice.147)
multiply.163 = f32[16]{0} multiply(multiply.162, reshape.148)
constant.138 = f32[] constant(0)
ROOT tuple.165 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.164, multiply.163, constant.138, get-tuple-element.135, get-tuple-element.136, get-tuple-element.137)
}
region_5.166 {
arg_tuple.167 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.169 = f32[16]{0} get-tuple-element(arg_tuple.167), index=1
get-tuple-element.170 = f32[] get-tuple-element(arg_tuple.167), index=2
get-tuple-element.171 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=3
get-tuple-element.172 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=4
get-tuple-element.173 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=5
get-tuple-element.168 = s32[] get-tuple-element(arg_tuple.167), index=0
constant.174 = s32[] constant(16)
ROOT compare.175 = pred[] compare(get-tuple-element.168, constant.174), direction=LT
}
ENTRY main.183 {
constant.6 = s32[] constant(0)
Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]}
call.55 = (f32[16,16]{1,0}, f32[16,16]{1,0}) call(Arg_0.1), to_apply=core_closed_call.43
get-tuple-element.56 = f32[16,16]{1,0} get-tuple-element(call.55), index=0
get-tuple-element.57 = f32[16,16]{1,0} get-tuple-element(call.55), index=1
constant.7 = f32[] constant(1)
tuple.58 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) tuple(get-tuple-element.56, get-tuple-element.57, Arg_0.1, constant.7)
opt-barrier.59 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) opt-barrier(tuple.58)
get-tuple-element.62 = f32[16]{0} get-tuple-element(opt-barrier.59), index=2
constant.4 = f32[] constant(0)
broadcast.5 = f32[16,16]{1,0} broadcast(constant.4), dimensions={}
get-tuple-element.60 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=0
get-tuple-element.61 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=1
tuple.64 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, get-tuple-element.62, broadcast.5, broadcast.5, broadcast.5, get-tuple-element.60, get-tuple-element.61)
while.121 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.64), condition=region_3.110, body=region_2.65
get-tuple-element.122 = s32[] get-tuple-element(while.121), index=0
get-tuple-element.123 = f32[16]{0} get-tuple-element(while.121), index=1
get-tuple-element.127 = f32[16,16]{1,0} get-tuple-element(while.121), index=5
get-tuple-element.128 = f32[16,16]{1,0} get-tuple-element(while.121), index=6
constant.2 = f32[] constant(0)
broadcast.3 = f32[16]{0} broadcast(constant.2), dimensions={}
get-tuple-element.63 = f32[] get-tuple-element(opt-barrier.59), index=3
get-tuple-element.124 = f32[16,16]{1,0} get-tuple-element(while.121), index=2
get-tuple-element.125 = f32[16,16]{1,0} get-tuple-element(while.121), index=3
get-tuple-element.126 = f32[16,16]{1,0} get-tuple-element(while.121), index=4
tuple.129 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, broadcast.3, get-tuple-element.63, get-tuple-element.124, get-tuple-element.125, get-tuple-element.126)
while.176 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.129), condition=region_5.166, body=region_4.130
get-tuple-element.177 = s32[] get-tuple-element(while.176), index=0
ROOT get-tuple-element.178 = f32[16]{0} get-tuple-element(while.176), index=1
get-tuple-element.179 = f32[] get-tuple-element(while.176), index=2
get-tuple-element.180 = f32[16,16]{1,0} get-tuple-element(while.176), index=3
get-tuple-element.181 = f32[16,16]{1,0} get-tuple-element(while.176), index=4
get-tuple-element.182 = f32[16,16]{1,0} get-tuple-element(while.176), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
bool changed =
ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value();
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
int64_t custom_calls_count = 0;
for (auto* c : module->computations()) {
for (auto* instr : c->instructions()) {
if (instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) ||
instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
++custom_calls_count;
}
}
}
EXPECT_EQ(custom_calls_count, 4);
}
TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest,
ConvertUnpinnedHostTest) {
const char* hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}}
region_0.9 {
arg_tuple.10 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.11 = s32[] get-tuple-element(arg_tuple.10), index=0
constant.15 = s32[] constant(1)
add.33 = s32[] add(get-tuple-element.11, constant.15)
get-tuple-element.12 = f32[16]{0} get-tuple-element(arg_tuple.10), index=1
sine.18 = f32[16]{0} sine(get-tuple-element.12)
sine.19 = f32[16]{0} sine(sine.18)
sine.20 = f32[16]{0} sine(sine.19)
get-tuple-element.13 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=2
custom-call.21 = f32[16]{0} custom-call(sine.19), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="unpinned_host"}
reshape.23 = f32[1,16]{1,0} reshape(custom-call.21)
constant.17 = s32[] constant(0)
compare.24 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
constant.16 = s32[] constant(16)
add.25 = s32[] add(get-tuple-element.11, constant.16)
select.26 = s32[] select(compare.24, add.25, get-tuple-element.11)
dynamic-update-slice.27 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.13, reshape.23, select.26, constant.17)
get-tuple-element.14 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=3
custom-call.22 = f32[16]{0} custom-call(sine.20), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="unpinned_host"}
reshape.28 = f32[1,16]{1,0} reshape(custom-call.22)
compare.29 = pred[] compare(get-tuple-element.11, constant.17), direction=LT
add.30 = s32[] add(get-tuple-element.11, constant.16)
select.31 = s32[] select(compare.29, add.30, get-tuple-element.11)
dynamic-update-slice.32 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.14, reshape.28, select.31, constant.17)
ROOT tuple.34 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.33, sine.20, dynamic-update-slice.27, dynamic-update-slice.32)
}
region_1.35 {
arg_tuple.36 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.38 = f32[16]{0} get-tuple-element(arg_tuple.36), index=1
get-tuple-element.39 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=2
get-tuple-element.40 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=3
get-tuple-element.37 = s32[] get-tuple-element(arg_tuple.36), index=0
constant.41 = s32[] constant(16)
ROOT compare.42 = pred[] compare(get-tuple-element.37, constant.41), direction=LT
}
core_closed_call.43 {
constant.47 = s32[] constant(0)
Arg_0.44 = f32[16]{0} parameter(0)
constant.45 = f32[] constant(0)
broadcast.46 = f32[16,16]{1,0} broadcast(constant.45), dimensions={}
tuple.48 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.47, Arg_0.44, broadcast.46, broadcast.46)
while.49 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.48), condition=region_1.35, body=region_0.9
get-tuple-element.50 = s32[] get-tuple-element(while.49), index=0
get-tuple-element.51 = f32[16]{0} get-tuple-element(while.49), index=1
get-tuple-element.52 = f32[16,16]{1,0} get-tuple-element(while.49), index=2
get-tuple-element.53 = f32[16,16]{1,0} get-tuple-element(while.49), index=3
ROOT tuple.54 = (f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(get-tuple-element.52, get-tuple-element.53)
}
region_2.65 {
arg_tuple.66 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(arg_tuple.66), index=0
constant.74 = s32[] constant(1)
add.108 = s32[] add(get-tuple-element.67, constant.74)
get-tuple-element.73 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=6
constant.76 = s32[] constant(0)
compare.82 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
constant.75 = s32[] constant(16)
add.83 = s32[] add(get-tuple-element.67, constant.75)
select.84 = s32[] select(compare.82, add.83, get-tuple-element.67)
dynamic-slice.85 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.73, select.84, constant.76), dynamic_slice_sizes={1,16}
reshape.86 = f32[16]{0} reshape(dynamic-slice.85)
custom-call.87 = f32[16]{0} custom-call(reshape.86), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
get-tuple-element.69 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=2
get-tuple-element.68 = f32[16]{0} get-tuple-element(arg_tuple.66), index=1
cosine.88 = f32[16]{0} cosine(get-tuple-element.68)
reshape.93 = f32[1,16]{1,0} reshape(cosine.88)
compare.94 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.95 = s32[] add(get-tuple-element.67, constant.75)
select.96 = s32[] select(compare.94, add.95, get-tuple-element.67)
dynamic-update-slice.97 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.69, reshape.93, select.96, constant.76)
get-tuple-element.70 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=3
sine.89 = f32[16]{0} sine(get-tuple-element.68)
cosine.90 = f32[16]{0} cosine(sine.89)
reshape.98 = f32[1,16]{1,0} reshape(cosine.90)
compare.99 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.100 = s32[] add(get-tuple-element.67, constant.75)
select.101 = s32[] select(compare.99, add.100, get-tuple-element.67)
dynamic-update-slice.102 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.70, reshape.98, select.101, constant.76)
get-tuple-element.71 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=4
get-tuple-element.72 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=5
compare.77 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.78 = s32[] add(get-tuple-element.67, constant.75)
select.79 = s32[] select(compare.77, add.78, get-tuple-element.67)
dynamic-slice.80 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.72, select.79, constant.76), dynamic_slice_sizes={1,16}
reshape.81 = f32[16]{0} reshape(dynamic-slice.80)
custom-call.91 = f32[16]{0} custom-call(reshape.81), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"}
cosine.92 = f32[16]{0} cosine(custom-call.91)
reshape.103 = f32[1,16]{1,0} reshape(cosine.92)
compare.104 = pred[] compare(get-tuple-element.67, constant.76), direction=LT
add.105 = s32[] add(get-tuple-element.67, constant.75)
select.106 = s32[] select(compare.104, add.105, get-tuple-element.67)
dynamic-update-slice.107 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.71, reshape.103, select.106, constant.76)
ROOT tuple.109 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.108, custom-call.87, dynamic-update-slice.97, dynamic-update-slice.102, dynamic-update-slice.107, get-tuple-element.72, get-tuple-element.73)
}
region_3.110 {
arg_tuple.111 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.113 = f32[16]{0} get-tuple-element(arg_tuple.111), index=1
get-tuple-element.114 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=2
get-tuple-element.115 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=3
get-tuple-element.116 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=4
get-tuple-element.117 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=5
get-tuple-element.118 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=6
get-tuple-element.112 = s32[] get-tuple-element(arg_tuple.111), index=0
constant.119 = s32[] constant(16)
ROOT compare.120 = pred[] compare(get-tuple-element.112, constant.119), direction=LT
}
region_4.130 {
arg_tuple.131 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.132 = s32[] get-tuple-element(arg_tuple.131), index=0
constant.140 = s32[] constant(1)
add.164 = s32[] add(get-tuple-element.132, constant.140)
get-tuple-element.133 = f32[16]{0} get-tuple-element(arg_tuple.131), index=1
get-tuple-element.134 = f32[] get-tuple-element(arg_tuple.131), index=2
broadcast.159 = f32[16]{0} broadcast(get-tuple-element.134), dimensions={}
add.160 = f32[16]{0} add(get-tuple-element.133, broadcast.159)
get-tuple-element.137 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=5
constant.141 = s32[] constant(16)
subtract.142 = s32[] subtract(constant.141, get-tuple-element.132)
subtract.143 = s32[] subtract(subtract.142, constant.140)
constant.139 = s32[] constant(0)
compare.154 = pred[] compare(subtract.143, constant.139), direction=LT
add.155 = s32[] add(subtract.143, constant.141)
select.156 = s32[] select(compare.154, add.155, subtract.143)
dynamic-slice.157 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.137, select.156, constant.139), dynamic_slice_sizes={1,16}
reshape.158 = f32[16]{0} reshape(dynamic-slice.157)
multiply.161 = f32[16]{0} multiply(add.160, reshape.158)
get-tuple-element.136 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=4
compare.149 = pred[] compare(subtract.143, constant.139), direction=LT
add.150 = s32[] add(subtract.143, constant.141)
select.151 = s32[] select(compare.149, add.150, subtract.143)
dynamic-slice.152 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.136, select.151, constant.139), dynamic_slice_sizes={1,16}
reshape.153 = f32[16]{0} reshape(dynamic-slice.152)
multiply.162 = f32[16]{0} multiply(multiply.161, reshape.153)
get-tuple-element.135 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=3
compare.144 = pred[] compare(subtract.143, constant.139), direction=LT
add.145 = s32[] add(subtract.143, constant.141)
select.146 = s32[] select(compare.144, add.145, subtract.143)
dynamic-slice.147 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.135, select.146, constant.139), dynamic_slice_sizes={1,16}
reshape.148 = f32[16]{0} reshape(dynamic-slice.147)
multiply.163 = f32[16]{0} multiply(multiply.162, reshape.148)
constant.138 = f32[] constant(0)
ROOT tuple.165 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.164, multiply.163, constant.138, get-tuple-element.135, get-tuple-element.136, get-tuple-element.137)
}
region_5.166 {
arg_tuple.167 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0)
get-tuple-element.169 = f32[16]{0} get-tuple-element(arg_tuple.167), index=1
get-tuple-element.170 = f32[] get-tuple-element(arg_tuple.167), index=2
get-tuple-element.171 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=3
get-tuple-element.172 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=4
get-tuple-element.173 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=5
get-tuple-element.168 = s32[] get-tuple-element(arg_tuple.167), index=0
constant.174 = s32[] constant(16)
ROOT compare.175 = pred[] compare(get-tuple-element.168, constant.174), direction=LT
}
ENTRY main.183 {
constant.6 = s32[] constant(0)
Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]}
call.55 = (f32[16,16]{1,0}, f32[16,16]{1,0}) call(Arg_0.1), to_apply=core_closed_call.43
get-tuple-element.56 = f32[16,16]{1,0} get-tuple-element(call.55), index=0
get-tuple-element.57 = f32[16,16]{1,0} get-tuple-element(call.55), index=1
constant.7 = f32[] constant(1)
tuple.58 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) tuple(get-tuple-element.56, get-tuple-element.57, Arg_0.1, constant.7)
opt-barrier.59 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) opt-barrier(tuple.58)
get-tuple-element.62 = f32[16]{0} get-tuple-element(opt-barrier.59), index=2
constant.4 = f32[] constant(0)
broadcast.5 = f32[16,16]{1,0} broadcast(constant.4), dimensions={}
get-tuple-element.60 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=0
get-tuple-element.61 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=1
tuple.64 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, get-tuple-element.62, broadcast.5, broadcast.5, broadcast.5, get-tuple-element.60, get-tuple-element.61)
while.121 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.64), condition=region_3.110, body=region_2.65
get-tuple-element.122 = s32[] get-tuple-element(while.121), index=0
get-tuple-element.123 = f32[16]{0} get-tuple-element(while.121), index=1
get-tuple-element.127 = f32[16,16]{1,0} get-tuple-element(while.121), index=5
get-tuple-element.128 = f32[16,16]{1,0} get-tuple-element(while.121), index=6
constant.2 = f32[] constant(0)
broadcast.3 = f32[16]{0} broadcast(constant.2), dimensions={}
get-tuple-element.63 = f32[] get-tuple-element(opt-barrier.59), index=3
get-tuple-element.124 = f32[16,16]{1,0} get-tuple-element(while.121), index=2
get-tuple-element.125 = f32[16,16]{1,0} get-tuple-element(while.121), index=3
get-tuple-element.126 = f32[16,16]{1,0} get-tuple-element(while.121), index=4
tuple.129 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, broadcast.3, get-tuple-element.63, get-tuple-element.124, get-tuple-element.125, get-tuple-element.126)
while.176 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.129), condition=region_5.166, body=region_4.130
get-tuple-element.177 = s32[] get-tuple-element(while.176), index=0
ROOT get-tuple-element.178 = f32[16]{0} get-tuple-element(while.176), index=1
get-tuple-element.179 = f32[] get-tuple-element(while.176), index=2
get-tuple-element.180 = f32[16,16]{1,0} get-tuple-element(while.176), index=3
get-tuple-element.181 = f32[16,16]{1,0} get-tuple-element(while.176), index=4
get-tuple-element.182 = f32[16,16]{1,0} get-tuple-element(while.176), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
bool changed =
ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value();
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
int64_t custom_calls_count = 0;
for (auto* c : module->computations()) {
for (auto* instr : c->instructions()) {
if (instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) ||
instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
++custom_calls_count;
}
}
}
EXPECT_EQ(custom_calls_count, 4);
}
TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest,
ConvertOutputPinnedHostTest) {
constexpr std::string_view hlo_string = R"(
HloModule m, entry_computation_layout={(f32[2,2]{1,0:T(2,128)},f32[2,2]{1,0:T(2,128)})->f32[2,2]{1,0:T(2,128)S(5)}}
ENTRY m {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
crs = f32[2,2] add(x, y)
ROOT transfer = f32[2,2] custom-call(crs), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
bool changed =
ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value();
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
int64_t move_to_host_count = 0;
for (auto* c : module->computations()) {
for (auto* instr : c->instructions()) {
move_to_host_count += instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget);
}
}
EXPECT_EQ(move_to_host_count, 1);
}
}
} |
1,966 | cpp | tensorflow/tensorflow | layout_assignment | third_party/xla/xla/service/gpu/transforms/layout_assignment.cc | third_party/xla/xla/service/gpu/transforms/layout_assignment_test.cc | #ifndef XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
#define XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
#include <cstdint>
#include <iosfwd>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace xla {
class LayoutAssignment;
class LayoutConstraint {
public:
LayoutConstraint(bool mandatory, bool dfs, int64_t priority)
: mandatory_(mandatory), dfs_(dfs), priority_(priority) {}
virtual ~LayoutConstraint() = default;
virtual std::string ToString() const = 0;
bool mandatory() const { return mandatory_; }
bool dfs() const { return dfs_; }
int64_t priority() const { return priority_; }
bool IsDefaultLayout() const { return priority_ == kDefaultPriority; }
static constexpr int64_t kDefaultPriority = -2;
static constexpr int64_t kBeginningPriority = 0;
static constexpr int64_t kGivenPriority = 3;
protected:
bool mandatory_;
bool dfs_;
int64_t priority_;
};
std::ostream& operator<<(std::ostream& out, const LayoutConstraint& constraint);
class BufferLayoutConstraint : public LayoutConstraint {
public:
BufferLayoutConstraint(const Layout& layout, const LogicalBuffer& buffer,
bool mandatory, bool dfs, int64_t priority);
const LogicalBuffer& buffer() const { return *buffer_; }
const Layout& layout() const { return layout_[0]; }
bool UpdateLayout(int64_t priority, const Layout& layout, bool mandatory,
bool dfs, LayoutAssignment* assignment,
const HloInstruction* from_user = nullptr);
std::string ToString() const override;
private:
absl::InlinedVector<Layout, 2> layout_;
const LogicalBuffer* buffer_;
const HloInstruction* from_user_ = nullptr;
};
class OperandLayoutConstraint : public LayoutConstraint {
public:
OperandLayoutConstraint(const ShapeLayout& shape_layout,
const HloInstruction* instruction, int64_t operand_no,
bool mandatory, bool dfs, int64_t priority);
const ShapeLayout& shape_layout() const { return shape_layout_[0]; }
const HloInstruction* instruction() const { return instruction_; }
int64_t operand_no() const { return operand_no_; }
const HloInstruction* operand() const {
return instruction_->operand(operand_no_);
}
bool UpdateLayout(int64_t priority, const Shape& new_shape, bool mandatory,
bool dfs, LayoutAssignment* assignment);
std::string ToString() const override;
private:
absl::InlinedVector<ShapeLayout, 2> shape_layout_;
const HloInstruction* instruction_;
int64_t operand_no_;
};
class ComputationLayoutConstraint : public LayoutConstraint {
public:
static constexpr int64_t kDefaultLayoutIsUsed = 0;
static constexpr int64_t kResultLayoutIsSet = 1;
static constexpr int64_t kParameterLayoutIsSet = 2;
static constexpr int64_t kComputationLayoutIsSet = 3;
explicit ComputationLayoutConstraint(const HloComputation* computation,
ComputationLayout* computation_layout,
int64_t priority)
: LayoutConstraint(true, true, priority),
layout_state_((computation_layout == nullptr)
? kDefaultLayoutIsUsed
: kComputationLayoutIsSet),
computation_layout_(
(computation_layout == nullptr)
? ComputationLayout(computation->ComputeProgramShape(),
false)
: *computation_layout) {}
const ComputationLayout& computation_layout() const {
return computation_layout_;
}
void ResetComputationLayout(const ComputationLayout& layout, int64_t priority,
bool prop_result_layout,
bool prop_parameter_layout) {
computation_layout_ = layout;
priority_ = priority;
if (prop_result_layout) {
layout_state_ |= kResultLayoutIsSet;
}
if (prop_parameter_layout) {
layout_state_ |= kParameterLayoutIsSet;
}
}
void ResetResultLayout(const ShapeLayout& shape_layout, int64_t priority) {
*computation_layout_.mutable_result_layout() = shape_layout;
layout_state_ |= kResultLayoutIsSet;
priority_ = priority;
}
bool parameter_layout_is_set() const {
return layout_state_ & kParameterLayoutIsSet;
}
bool result_layout_is_set() const {
return layout_state_ & kResultLayoutIsSet;
}
bool default_layout_is_used() const {
return layout_state_ == kDefaultLayoutIsUsed;
}
std::string ToString() const override;
private:
int64_t layout_state_;
ComputationLayout computation_layout_;
};
class ChannelLayoutConstraints {
public:
ChannelLayoutConstraints() = default;
bool IsChannelConstrained(int64_t channel_id) const {
return constraints_.contains(channel_id);
}
Shape LayoutShapeForChannel(Shape shape, int64_t channel_id) const {
auto it = constraints_.find(channel_id);
CHECK(it != constraints_.end()) << "Channel " << channel_id;
*shape.mutable_layout() = it->second;
return shape;
}
const Layout& LayoutForChannel(int64_t channel_id) const {
auto it = constraints_.find(channel_id);
CHECK(it != constraints_.end()) << "Channel " << channel_id;
return it->second;
}
const Layout* ConstrainChannel(int64_t channel_id, const Layout& layout) {
auto it = constraints_.emplace(std::make_pair(channel_id, layout));
if (it.second) {
return nullptr;
}
return LayoutUtil::Equal(layout, it.first->second) ? nullptr
: &it.first->second;
}
private:
absl::flat_hash_map<int64_t, Layout> constraints_;
};
class LayoutAssignment : public HloModulePass {
public:
explicit LayoutAssignment(
ComputationLayout* entry_computation_layout,
ChannelLayoutConstraints* channel_constraints = nullptr,
bool reverse_computation_order = false);
~LayoutAssignment() override {}
const TuplePointsToAnalysis& points_to_analysis() const {
return *points_to_analysis_;
}
absl::string_view name() const override { return "layout-assignment"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
class LayoutConstraints {
public:
explicit LayoutConstraints(HloComputation* computation,
ComputationLayout* computation_layout,
int64_t priority)
: computation_(computation),
computation_constraint_(computation, computation_layout, priority) {}
~LayoutConstraints() = default;
const HloComputation* computation() const { return computation_; }
HloComputation* computation() { return computation_; }
void ResetOperandConstraints() { operand_constraints_.clear(); }
const ShapeLayout* OperandLayout(const HloInstruction* instruction,
int64_t operand_no) const;
const OperandLayoutConstraint* GetOperandLayoutConstraint(
const HloInstruction* instruction, int64_t operand_no) const;
OperandLayoutConstraint* MutableOperandLayoutConstraint(
const HloInstruction* instruction, int64_t operand_no);
const ShapeLayout* ResultLayout() const;
OperandLayoutConstraint* InsertOperandLayoutConstraint(
const HloInstruction* instruction, int64_t operand_no,
const OperandLayoutConstraint& constraint);
absl::Status SetResultLayout(LayoutAssignment* assignment,
const Shape& shape_with_layout,
int64_t priority);
const ComputationLayout& computation_layout() const {
return computation_constraint_.computation_layout();
}
const ComputationLayoutConstraint& computation_constraint() const {
return computation_constraint_;
}
ComputationLayoutConstraint* mutable_computation_constraint() {
return &computation_constraint_;
}
private:
using OperandConstraintKey = std::pair<const HloInstruction*, int64_t>;
std::map<OperandConstraintKey, OperandLayoutConstraint>
operand_constraints_;
HloComputation* computation_;
ComputationLayoutConstraint computation_constraint_;
};
static bool InstructionCanChangeLayout(const HloInstruction* instruction);
const LayoutConstraints& computation_constraints(
const HloComputation* computation) const {
return *FindOrDie(computation_layouts_, computation);
}
LayoutConstraints& mutable_computation_constraints(
const HloComputation* computation) {
return *FindOrDie(computation_layouts_, computation);
}
LayoutConstraints* mutable_computation_constraints(
HloComputation* computation) {
auto it = computation_layouts_.find(computation);
LayoutConstraints* constraints = nullptr;
if (it == computation_layouts_.end()) {
computation_layouts_.emplace(
computation,
constraints = new LayoutConstraints(
computation, nullptr, LayoutConstraint::kDefaultPriority));
} else {
constraints = (*it).second.get();
}
return constraints;
}
void PushAddedConstraints(const LayoutConstraint* constraint);
static bool IsAtMostRank1(const Shape& shape);
absl::Status SetArrayOperandLayout(const Layout& layout,
const HloInstruction* instruction,
int64_t operand_no, bool mandatory = true,
bool dfs = true) {
return SetArrayOperandLayout(layout, instruction, operand_no, mandatory,
dfs, current_priority_);
}
absl::Status SetArrayOperandLayout(const Layout& layout,
const HloInstruction* instruction,
int64_t operand_no, bool mandatory,
bool dfs, int64_t priority);
absl::Status SetInstructionLayout(const Shape& shape_with_layout,
const HloInstruction* instruction,
bool mandatory = true, bool dfs = true,
bool allow_alias = false) {
return SetInstructionLayout(shape_with_layout, instruction, mandatory, dfs,
allow_alias, current_priority_);
}
absl::Status SetInstructionLayout(const Shape& shape_with_layout,
const HloInstruction* instruction,
bool mandatory, bool dfs, bool allow_alias,
int64_t priority);
absl::Status SetInstructionLayout(const Layout& layout,
const HloInstruction* instruction,
bool mandatory = true, bool dfs = true,
bool allow_alias = false,
int64_t priority = -1);
absl::Status SetBufferLayout(const Layout& layout,
const LogicalBuffer& buffer,
bool mandatory = true, bool dfs = true) {
return SetBufferLayout(layout, buffer, mandatory, dfs, current_priority_);
}
absl::Status SetBufferLayout(const Layout& layout,
const LogicalBuffer& buffer, bool mandatory,
bool dfs, int64_t priority,
const HloInstruction* from_user = nullptr);
absl::Status SetOperandLayout(const Shape& shape_with_layout,
const HloInstruction* instruction,
int64_t operand_no, bool mandatory = true,
bool dfs = true) {
return SetOperandLayout(shape_with_layout, instruction, operand_no,
mandatory, dfs, current_priority_);
}
absl::Status SetOperandLayout(const Shape& shape_with_layout,
const HloInstruction* instruction,
int64_t operand_no, bool mandatory, bool dfs,
int64_t priority);
bool reverse_computation_order() const { return reverse_computation_order_; }
ComputationLayout& saved_entry_computation_layout() {
return saved_entry_computation_layout_;
}
virtual bool NegotiateLayout(const HloInstruction* instruction,
const Layout& new_layout,
const Layout& existing_layout,
const HloInstruction* from_user,
const HloInstruction* orig_user) {
return false;
}
virtual bool NegotiateOperandLayout(const HloInstruction* instruction,
int64_t operand_no,
const Layout& new_layout,
const Layout& existing_layout) {
return false;
}
virtual bool OperandLayoutAlwaysPropagateForward(const HloInstruction* user);
virtual bool OperandLayoutAlwaysPropagateToSiblings(
const HloInstruction* user);
virtual bool OutputLayoutAlwaysPropagateToOperands(
const HloInstruction* user);
virtual bool PropagateReductionLayoutToOperand(const HloInstruction* user) {
return false;
}
protected:
virtual absl::Status PropagateBufferConstraint(
const BufferLayoutConstraint& buffer_constraint,
LayoutConstraints* constraints);
virtual absl::Status PropagateOperandConstraint(
const OperandLayoutConstraint& operand_constraint,
LayoutConstraints* constraints);
virtual absl::Status PropagateResultConstraint(
const ComputationLayoutConstraint& layout_constraint,
LayoutConstraints* constraints);
virtual Layout GetUnconstrainedLayout(const LogicalBuffer& buffer) {
return LayoutUtil::GetDefaultLayoutForShape(buffer.shape());
}
virtual absl::Status Verify(const HloInstruction* instruction) {
return absl::OkStatus();
}
absl::Status PropagateUnconstraintedBuffers(LayoutConstraints* constraints);
const BufferLayoutConstraint* GetBufferLayoutConstraint(
const LogicalBuffer& buffer) const;
absl::StatusOr<const BufferLayoutConstraint*>
GetInstructionBufferLayoutConstraint(const HloInstruction* instruction) const;
PointsToSet::BufferSet* GetBufferSet(const HloInstruction* instruction) const;
bool AllOperandBuffersForwarded(const HloInstruction* instruction,
int64_t operand_no) const;
bool AnyOperandBufferForwarded(const HloInstruction* instruction,
int64_t operand_no) const;
absl::StatusOr<Layout> InferArrayLayout(const HloInstruction* instruction,
const ShapeIndex& index);
absl::Status PropagateBufferConstraintToUses(
const BufferLayoutConstraint& buffer_constraint,
LayoutConstraints* constraints);
absl::Status PropagateUseConstraintToDefs(
const ShapeLayout& shape_layout, const HloInstruction* instruction,
LayoutConstraints* constraints, int64_t priority,
const HloInstruction* user = nullptr);
virtual std::unique_ptr<Layout> ChooseOperandLayoutFromOutputLayout(
const Layout& output_layout, const HloInstruction* instruction,
int64_t operand_no);
virtual std::unique_ptr<Layout> ChooseOutputLayoutFromOperandLayout(
const Layout& operand_layout, const HloInstruction* user,
int64_t operand_no);
virtual bool InstructionCanChangeLayoutInstance(
const HloInstruction* instruction);
virtual Shape ShardedShape(const HloInstruction* call, const Shape& shape,
int param_id) {
return shape;
}
virtual Shape UnShardedShape(const HloInstruction* call, const Shape& shape,
int param_id) {
return shape;
}
absl::Status CheckCallLayout(HloInstruction* call,
const ComputationLayout& computation_layout);
private:
absl::Status Init(HloModule* module);
absl::Status AddMandatoryConstraints(
ChannelLayoutConstraints* channel_constraints,
LayoutConstraints* constraints);
std::vector<const LayoutConstraint*> ConsumeAddedConstraints() {
std::vector<const LayoutConstraint*> ret_vec(std::move(added_constraints_));
added_constraints_.clear();
return ret_vec;
}
void ClearAddedConstraints() { added_constraints_.clear(); }
virtual absl::Status AddBackendConstraints(LayoutConstraints* constraints) {
return absl::OkStatus();
}
absl::Status RunOnComputation(LayoutConstraints* constraints,
ChannelLayoutConstraints* channel_constraints);
absl::Status AssignLayouts(LayoutConstraints& constraints);
absl::Status PropagateConstraints(LayoutConstraints* constraints);
absl::Status PropagateBufferConstraintToOperands(
const BufferLayoutConstraint& buffer_constraint,
LayoutConstraints* constraints);
absl::Status CheckLayouts(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::Status CalculateComputationLayout(LayoutConstraints* constraints);
absl::Status ClearComputationLayouts(HloComputation* computation);
absl::Status ClearPreviousPassSideEffects(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::Status PropagateComputationLayouts(
HloComputation* computation, ComputationLayout* computation_layout);
ComputationLayout* entry_computation_layout_;
ComputationLayout saved_entry_computation_layout_;
bool reverse_computation_order_;
protected:
static constexpr int64_t kNumberOfPropagationRounds = 2;
static void SetupCopiedInstruction(const HloInstruction& instruction,
HloInstruction* copy,
const ShapeIndex& index);
absl::StatusOr<HloInstruction*> CreateCopyWithNewLayout(
const Shape& shape_with_layout, HloInstruction* instruction);
virtual absl::Status CopyOperandIfLayoutsDiffer(
const ShapeLayout& operand_layout, HloInstruction* instruction,
int64_t operand_no);
void RegisterAddedCopy(HloInstruction* copy) {
CHECK_EQ(copy->opcode(), HloOpcode::kCopy);
added_copies_.insert(copy);
}
absl::Status AddCopyForOperand(HloInstruction* instruction,
int64_t operand_number);
absl::Status ConstrainChannelLayouts(
HloComputation* computation,
ChannelLayoutConstraints* channel_constraints);
void ResetChannelConstraints() {
if (channel_layout_constraints_ != nullptr) {
*channel_layout_constraints_ = channel_constraints_;
}
}
absl::Status BuildHostChannelConstraints(HloComputation* computation);
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
absl::flat_hash_set<const HloInstruction*> unconstrained_layout_instructions_;
HloPredicate instruction_can_change_layout_func_;
std::unique_ptr<CallGraph> call_graph_;
std::string ToString(const LayoutConstraints& constraints) const;
int64_t current_priority() const { return current_priority_; }
private: | #include "xla/service/layout_assignment.h"
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace m = xla::match;
using ::testing::ElementsAre;
class LayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* m, ComputationLayout* entry_computation_layout,
ChannelLayoutConstraints* channel_constraints = nullptr) {
LayoutAssignment layout_assignment(
entry_computation_layout,
channel_constraints);
EXPECT_IS_OK(layout_assignment.Run(m).status());
}
std::vector<int64_t> LayoutOf(HloModule* m, absl::string_view name) {
HloInstruction* instr = FindInstruction(m, name);
CHECK(instr != nullptr) << name;
auto minor_to_major = instr->shape().layout().minor_to_major();
return std::vector<int64_t>(minor_to_major.begin(), minor_to_major.end());
}
void ExpectLayoutIs(const Shape& shape,
absl::Span<const int64_t> minor_to_major) {
const Layout expected = LayoutUtil::MakeLayout(minor_to_major);
EXPECT_TRUE(LayoutUtil::Equal(shape.layout(), expected))
<< "Expected layout " << expected << ", actual " << shape.layout();
}
void ExpectTupleLayoutIs(
const Shape& shape,
std::initializer_list<absl::Span<const int64_t>> minor_to_majors) {
int i = 0;
for (const absl::Span<const int64_t> minor_to_major : minor_to_majors) {
const Layout expected = LayoutUtil::MakeLayout(minor_to_major);
const Layout& actual = ShapeUtil::GetTupleElementShape(shape, i).layout();
EXPECT_TRUE(LayoutUtil::Equal(actual, expected))
<< "Expected tuple element " << i << " layout " << expected
<< ", actual " << actual;
++i;
}
}
};
TEST_F(LayoutAssignmentTest, ComputationLayout) {
std::vector<std::vector<int64_t>> minor_to_majors = {{0, 1}, {1, 0}};
for (auto& minor_to_major : minor_to_majors) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, ashape, "param1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(ashape, HloOpcode::kAdd, param0, param1));
auto m = CreateNewVerifiedModule();
HloComputation* computation = m->AddEntryComputation(builder.Build());
Layout layout = LayoutUtil::MakeLayout(minor_to_major);
Shape shape(ashape);
*shape.mutable_layout() = layout;
const ShapeLayout shape_layout(shape);
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = shape_layout;
*computation_layout.mutable_parameter_layout(1) = shape_layout;
*computation_layout.mutable_result_layout() = shape_layout;
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(layout, param0->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(layout, param1->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(layout, add->shape().layout()));
}
}
TEST_F(LayoutAssignmentTest, ComputationLayoutMixedLayout) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, ashape, "param1"));
builder.AddInstruction(
HloInstruction::CreateBinary(ashape, HloOpcode::kAdd, param0, param1));
auto m = CreateNewVerifiedModule();
HloComputation* computation = m->AddEntryComputation(builder.Build());
Layout col_major_layout = LayoutUtil::MakeLayout({1, 0});
Shape col_major_shape(ashape);
*col_major_shape.mutable_layout() = col_major_layout;
const ShapeLayout col_major(col_major_shape);
Layout row_major_layout = LayoutUtil::MakeLayout({0, 1});
Shape row_major_shape(ashape);
*row_major_shape.mutable_layout() = row_major_layout;
const ShapeLayout row_major(row_major_shape);
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = col_major;
*computation_layout.mutable_parameter_layout(1) = row_major;
*computation_layout.mutable_result_layout() = col_major;
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(col_major_layout, param0->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(row_major_layout, param1->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
col_major_layout, computation->root_instruction()->shape().layout()));
}
TEST_F(LayoutAssignmentTest, FusionInstruction) {
std::vector<std::vector<int64_t>> minor_to_majors = {{0, 1}, {1, 0}};
for (auto& minor_to_major : minor_to_majors) {
auto builder = HloComputation::Builder(TestName());
auto constant_literal1 = LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout(minor_to_major));
auto constant_literal2 = LiteralUtil::CreateR2WithLayout<float>(
{{5.0, 6.0}, {7.0, 8.0}}, LayoutUtil::MakeLayout(minor_to_major));
Shape ashape = constant_literal1.shape();
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(constant_literal1)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(constant_literal2)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
ashape, HloOpcode::kAdd, constant1, constant2));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(ashape, HloOpcode::kNegate, add));
auto negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(ashape, HloOpcode::kNegate, negate1));
auto m = CreateNewVerifiedModule();
HloComputation* computation = m->AddEntryComputation(builder.Build());
auto fusion = computation->CreateFusionInstruction(
{negate2, negate1, add}, HloInstruction::FusionKind::kLoop);
Layout layout = LayoutUtil::MakeLayout(minor_to_major);
Shape shape(ashape);
*shape.mutable_layout() = layout;
const ShapeLayout shape_layout(shape);
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_result_layout() = shape_layout;
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(
layout, fusion->fused_parameter(0)->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
layout, fusion->fused_parameter(1)->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
layout, fusion->fused_expression_root()->shape().layout()));
EXPECT_FALSE(LayoutUtil::HasLayout(
fusion->fused_expression_root()->operand(0)->shape()));
}
}
TEST_F(LayoutAssignmentTest, TupleLayout) {
auto builder = HloComputation::Builder(TestName());
auto constant0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant0, constant1}));
auto get_element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(constant0->shape(), tuple, 0));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
constant0->shape(), HloOpcode::kNegate, get_element0));
auto m = CreateNewVerifiedModule();
m->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(
LayoutUtil::LayoutsInShapesEqual(constant0->shape(), constant1->shape()));
EXPECT_TRUE(LayoutUtil::HasLayout(tuple->shape()));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(
negate->shape(), computation_layout.result_layout().shape()));
EXPECT_TRUE(LayoutUtil::LayoutsInShapesEqual(
ShapeUtil::GetTupleElementShape(tuple->shape(), 1), constant1->shape()));
}
TEST_F(LayoutAssignmentTest, ConflictingLayoutTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto inner_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant}));
auto nested_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, inner_tuple}));
auto m = CreateNewVerifiedModule();
m->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
Shape result_shape = nested_tuple->shape();
*ShapeUtil::GetMutableSubshape(&result_shape, {0, 0}) =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0});
*ShapeUtil::GetMutableSubshape(&result_shape, {1, 0}) =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1});
TF_CHECK_OK(computation_layout.mutable_result_layout()->CopyLayoutFromShape(
result_shape));
LayoutAssignment layout_assignment(&computation_layout);
AssignLayouts(m.get(), &computation_layout);
AlgebraicSimplifierOptions options(
[](const Shape&, const Shape&) { return false; });
options.set_is_layout_sensitive(true);
EXPECT_TRUE(AlgebraicSimplifier(options).Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_TRUE(ShapeUtil::Equal(result_shape, root->shape()));
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::GetSubshape(result_shape, {0}),
root->operand(0)->shape()));
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::GetSubshape(result_shape, {1}),
root->operand(1)->shape()));
EXPECT_THAT(root,
GmockMatch(m::Tuple(m::Tuple(m::Op().Is(constant)),
m::Tuple(m::Copy(m::Op().Is(constant))))));
}
TEST_F(LayoutAssignmentTest, ElementwiseAndReshape) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {1, 2, 3, 1});
Shape bshape = ShapeUtil::MakeShape(F32, {3, 1, 2});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "param"));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(ashape, HloOpcode::kLog, param));
auto reshape =
builder.AddInstruction(HloInstruction::CreateReshape(bshape, log));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(bshape, HloOpcode::kTanh, reshape));
auto m = CreateNewVerifiedModule();
HloComputation* computation = m->AddEntryComputation(builder.Build(tanh));
Shape ashape_with_layout(ashape);
Shape bshape_with_layout(bshape);
*ashape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({0, 2, 1, 3});
*bshape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({2, 1, 0});
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(ashape_with_layout);
*computation_layout.mutable_result_layout() = ShapeLayout(bshape_with_layout);
AssignLayouts(m.get(), &computation_layout);
auto log_minor_to_major = log->shape().layout().minor_to_major();
EXPECT_GT(PositionInContainer(log_minor_to_major, 1),
PositionInContainer(log_minor_to_major, 2));
auto reshape_minor_to_major = reshape->shape().layout().minor_to_major();
EXPECT_GT(PositionInContainer(reshape_minor_to_major, 0),
PositionInContainer(reshape_minor_to_major, 2));
}
TEST_F(LayoutAssignmentTest, ElementwiseAndTranspose) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
Shape bshape = ShapeUtil::MakeShape(F32, {12, 42});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "param"));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(ashape, HloOpcode::kLog, param));
auto transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(bshape, log, {1, 0}));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(bshape, HloOpcode::kTanh, transpose));
auto m = CreateNewVerifiedModule();
auto computation = m->AddEntryComputation(builder.Build(tanh));
Shape ashape_with_layout(ashape);
Shape bshape_with_layout(bshape);
*ashape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({1, 0});
*bshape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(ashape_with_layout);
*computation_layout.mutable_result_layout() = ShapeLayout(bshape_with_layout);
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(
LayoutUtil::Equal(ashape_with_layout.layout(), log->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(bshape_with_layout.layout(),
transpose->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(bshape_with_layout.layout(), tanh->shape().layout()));
}
TEST_F(LayoutAssignmentTest, BroadcastAndTranspose) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {3, 4});
Shape bshape = ShapeUtil::MakeShape(F32, {2, 3, 4});
Shape cshape = ShapeUtil::MakeShape(F32, {4, 3, 2});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "param"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(bshape, param, {1, 2}));
auto transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(cshape, broadcast, {2, 1, 0}));
auto m = CreateNewVerifiedModule();
HloComputation* computation =
m->AddEntryComputation(builder.Build(transpose));
Shape input_shape_with_layout(ashape);
Shape output_shape_with_layout(cshape);
*input_shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({1, 0});
*output_shape_with_layout.mutable_layout() =
LayoutUtil::MakeLayout({2, 1, 0});
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(input_shape_with_layout);
*computation_layout.mutable_result_layout() =
ShapeLayout(output_shape_with_layout);
AssignLayouts(m.get(), &computation_layout);
EXPECT_THAT(broadcast->shape().layout().minor_to_major(),
ElementsAre(0, 1, 2));
}
TEST_F(LayoutAssignmentTest, ReshapeOperandHasMultipleUsers) {
Shape f32_4 = ShapeUtil::MakeShape(F32, {4});
Shape f32_34 = ShapeUtil::MakeShape(F32, {3, 4});
Shape f32_43 = ShapeUtil::MakeShape(F32, {4, 3});
Shape f32_234 = ShapeUtil::MakeShape(F32, {2, 3, 4});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_4, "param"));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32_34, param, {1}));
auto transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(f32_43, broadcast, {1, 0}));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(f32_34, HloOpcode::kTanh, broadcast));
auto broadcast2 = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32_234, tanh, {1, 2}));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({transpose, broadcast2}));
auto m = CreateNewVerifiedModule();
HloComputation* computation = m->AddEntryComputation(builder.Build(tuple));
ComputationLayout computation_layout(computation->ComputeProgramShape());
Shape param_shape_with_layout(f32_4);
Shape transpose_shape_with_layout(f32_43);
Shape broadcast2_shape_with_layout(f32_234);
*param_shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({0});
*transpose_shape_with_layout.mutable_layout() =
LayoutUtil::MakeLayout({1, 0});
*broadcast2_shape_with_layout.mutable_layout() =
LayoutUtil::MakeLayout({2, 1, 0});
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(param_shape_with_layout);
*computation_layout.mutable_result_layout() =
ShapeLayout(ShapeUtil::MakeTupleShape(
{transpose_shape_with_layout, broadcast2_shape_with_layout}));
AssignLayouts(m.get(), &computation_layout);
EXPECT_THAT(broadcast->shape().layout().minor_to_major(), ElementsAre(0, 1));
EXPECT_THAT(transpose->shape().layout().minor_to_major(), ElementsAre(1, 0));
EXPECT_THAT(tanh->shape().layout().minor_to_major(), ElementsAre(0, 1));
}
class OperandsMustBeTheSameLayoutAssignment : public LayoutAssignment {
public:
explicit OperandsMustBeTheSameLayoutAssignment(
ComputationLayout* entry_computation_layout)
: LayoutAssignment(entry_computation_layout) {}
protected:
absl::Status PropagateBufferConstraint(
const BufferLayoutConstraint& buffer_constraint,
LayoutConstraints* constraints) override {
const LogicalBuffer& buffer = buffer_constraint.buffer();
const HloInstruction* instruction = buffer.instruction();
for (int64_t operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
const HloInstruction* operand = instruction->operand(operand_no);
if (instruction->shape().rank() != operand->shape().rank()) {
continue;
}
TF_RETURN_IF_ERROR(SetArrayOperandLayout(buffer_constraint.layout(),
instruction, operand_no,
true));
}
return PropagateBufferConstraintToUses(buffer_constraint, constraints);
}
};
TEST_F(LayoutAssignmentTest, MakeOperandsTheSame) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {50, 1});
Shape bshape = ShapeUtil::MakeShape(F32, {50, 2});
Shape cshape = ShapeUtil::MakeShape(F32, {100});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, ashape, "param"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, ashape, "param"));
auto concatenate = builder.AddInstruction(
HloInstruction::CreateConcatenate(bshape, {param0, param1}, 1));
auto reshape = builder.AddInstruction(
HloInstruction::CreateReshape(cshape, concatenate));
auto m = CreateNewVerifiedModule();
HloComputation* computation = m->AddEntryComputation(builder.Build(reshape));
Shape param0_shape_with_layout(ashape);
Shape param1_shape_with_layout(ashape);
*param0_shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
*param1_shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({1, 0});
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(param0_shape_with_layout);
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(param1_shape_with_layout);
OperandsMustBeTheSameLayoutAssignment layout_assignment(&computation_layout);
EXPECT_IS_OK(layout_assignment.Run(m.get()).status());
EXPECT_EQ(concatenate->operand(0)->shape().layout().minor_to_major(),
concatenate->operand(1)->shape().layout().minor_to_major());
EXPECT_EQ(concatenate->shape().layout().minor_to_major(),
concatenate->operand(1)->shape().layout().minor_to_major());
}
TEST_F(LayoutAssignmentTest, TransposeToBitcastFromOperand) {
auto builder = HloComputation::Builder(TestName());
Shape input_shape_with_layout =
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 5, 6, 7}, {2, 0, 3, 1});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape_with_layout, "param"));
auto transpose = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {6, 7, 3, 5}), param, {2, 3, 0, 1}));
auto m = CreateNewVerifiedModule();
HloComputation* computation =
m->AddEntryComputation(builder.Build(transpose));
ComputationLayout computation_layout(computation->ComputeProgramShape());
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(transpose->operand(0)->shape(),
transpose->shape(), {2, 3, 0, 1}));
}
TEST_F(LayoutAssignmentTest, TransposeToBitcastToUser) {
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {3, 5, 6, 7});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(input_shape, constant, {}));
auto transpose = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {6, 7, 3, 5}), broadcast, {2, 3, 0, 1}));
auto m = CreateNewVerifiedModule();
HloComputation* computation =
m->AddEntryComputation(builder.Build(transpose));
ComputationLayout computation_layout(computation->ComputeProgramShape());
AssignLayouts(m.get(), &computation_layout);
EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(transpose->operand(0)->shape(),
transpose->shape(), {2, 3, 0, 1}));
}
TEST_F(LayoutAssignmentTest, TransposeIsBitcastFail) {
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Shape input_shape_with_layout(input_shape);
*input_shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({2, 1, 0});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape_with_layout, "param"));
auto hlo = builder.AddInstruction(
HloInstruction::CreateTranspose(input_shape, param, {0, 2, 1}));
LayoutUtil::ClearLayout(hlo->mutable_shape());
EXPECT_DEATH(ShapeUtil::TransposeIsBitcast(hlo->operand(0)->shape(),
hlo->shape(), hlo->dimensions()),
"has_layout");
}
TEST_F(LayoutAssignmentTest, ReshapeIsBitcastFail) {
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Shape input_shape_with_layout(input_shape);
*input_shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout({2, 1, 0});
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape_with_layout, "param"));
auto hlo =
builder.AddInstruction(HloInstruction::CreateReshape(input_shape, param));
LayoutUtil::ClearLayout(hlo->mutable_shape());
EXPECT_DEATH(
ShapeUtil::ReshapeIsBitcast(hlo->operand(0)->shape(), hlo->shape()),
"has_layout");
}
TEST_F(LayoutAssignmentTest, TransposeWithinFusionDoesNotCrash) {
const char* module_str = R"(
HloModule test_module
fused_computation {
param_1 = f32[2,2,2]{2,1,0} parameter(1)
transpose = f32[2,2,2]{2,1,0} transpose(param_1), dimensions={0,2,1}
reduce_1 = f32[] parameter(0)
broadcast_1 = f32[2,2,2]{2,1,0} broadcast(reduce_1), dimensions={}
ROOT divide_1 = f32[2,2,2]{2,1,0} divide(transpose, broadcast_1)
}
ENTRY entry_computation {
fusion.1 = f32[2,2,2]{2,1,0} parameter(1)
reduce.1 = f32[] parameter(0)
fusion.2 = f32[2,2,2]{2,1,0} fusion(reduce.1, fusion.1), kind=kLoop, calls=fused_computation
ROOT tuple.1 = (f32[2,2,2]{2,1,0}) tuple(fusion.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
std::unique_ptr<HloModule> compiled_module =
backend()
.compiler()
->RunHloPasses(m->Clone(), backend().default_stream_executor(),
nullptr)
.value();
EXPECT_EQ(absl::OkStatus(),
backend()
.compiler()
->RunBackend(std::move(compiled_module),
backend().default_stream_executor(),
nullptr)
.status());
}
TEST_F(LayoutAssignmentTest, GTEInheritsLayoutFromOperand) {
const char* module_str = R"(
HloModule test_module
fused_computation {
fparam = (f32[2,2,2], (f32[2,2,2], f32[2,2,2])) parameter(0)
gte0 = f32[2,2,2] get-tuple-element(fparam), index=0
gte1 = (f32[2,2,2], f32[2,2,2]) get-tuple-element(fparam), index=1
gte1a = f32[2,2,2] get-tuple-element(gte1), index=0
gte1b = f32[2,2,2] get-tuple-element(gte1), index=1
add = f32[2,2,2] add(gte1a, gte1b)
ROOT fresult = f32[2,2,2] add(gte0, add)
}
ENTRY entry_computation {
param = (f32[2,2,2], (f32[2,2,2], f32[2,2,2])) parameter(0)
ROOT fusion =
f32[2,2,2] fusion(param), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(module_str));
ComputationLayout computation_layout(
m->entry_computation()->ComputeProgramShape());
Shape param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2, 2}, {0, 1, 2}),
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2, 2}, {1, 2, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2, 2}, {2, 0, 1}),
})});
TF_ASSERT_OK(
computation_layout.mutable_parameter_layout(0)->CopyLayoutFromShape(
param_shape));
computation_layout.mutable_result_layout()->ResetLayout(
LayoutUtil::MakeLayout({2, 1, 0}));
AssignLayouts(m.get(), &computation_layout);
EXPECT_THAT(LayoutOf(m.get(), "gte0"), ElementsAre(0, 1, 2));
EXPECT_THAT(LayoutOf(m.get(), "gte1a"), ElementsAre(1, 2, 0));
EXPECT_THAT(LayoutOf(m.get(), "gte1b"), ElementsAre(2, 0, 1));
EXPECT_THAT(LayoutOf(m.get(), "fresult"), ElementsAre(2, 1, 0));
EXPECT_THAT(FindInstruction(m.get(), "gte1")
->shape()
.tuple_shapes(0)
.layout()
.minor_to_major(),
ElementsAre(1, 2, 0));
EXPECT_THAT(FindInstruction(m.get(), "gte1")
->shape()
.tuple_shapes(1)
.layout()
.minor_to_major(),
ElementsAre(2, 0, 1));
}
TEST_F(LayoutAssignmentTest, ConditionalAsymmetricLayout) {
auto builder = HloComputation::Builder(TestName());
auto m = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(F32, {128, 8});
Shape tshape = ShapeUtil::MakeTupleShape({shape, shape});
Shape result_tshape = ShapeUtil::MakeTupleShape({shape});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
auto pred = builder.AddInstruction(HloInstruction::Create |
1,967 | cpp | tensorflow/tensorflow | memory_space_propagation | third_party/xla/xla/service/memory_space_propagation.cc | third_party/xla/xla/service/memory_space_propagation_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_PROPAGATION_H_
#define XLA_SERVICE_MEMORY_SPACE_PROPAGATION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class MemorySpacePropagation : public HloModulePass {
public:
~MemorySpacePropagation() override = default;
absl::string_view name() const override { return "memory-space-propagation"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool Propagate(ShapeIndexView index, const HloInstruction* callee_instruction,
int64_t memory_space) const;
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis_;
};
}
#endif
#include "xla/service/memory_space_propagation.h"
#include <cstdint>
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> MemorySpacePropagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool modified = false;
TF_ASSIGN_OR_RETURN(auto dataflow_analysis,
HloDataflowAnalysis::Run(*module, false,
true));
dataflow_analysis_ = std::move(dataflow_analysis);
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
for (int operand_idx = 0; operand_idx < instruction->operand_count();
++operand_idx) {
ShapeUtil::ForEachLeafShape(
instruction->operand(operand_idx)->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
int64_t memory_space = sub_shape.layout().memory_space();
modified |=
Propagate(index, instruction->fused_parameter(operand_idx),
memory_space);
});
}
ShapeUtil::ForEachLeafShape(
instruction->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
int64_t memory_space = sub_shape.layout().memory_space();
modified |= Propagate(index, instruction->fused_expression_root(),
memory_space);
});
}
}
}
return modified;
}
bool MemorySpacePropagation::Propagate(ShapeIndexView index,
const HloInstruction* callee_instruction,
int64_t memory_space) const {
bool modified = false;
const HloValue& value = dataflow_analysis_->GetUniqueValueAt(
callee_instruction, ShapeIndex(index));
for (const HloPosition& position : value.positions()) {
HloInstruction* instruction = position.instruction;
Shape* shape = ShapeUtil::GetMutableSubshape(instruction->mutable_shape(),
position.index);
if (shape->layout().memory_space() == memory_space) {
continue;
}
shape->mutable_layout()->set_memory_space(memory_space);
modified = true;
if (instruction->opcode() == HloOpcode::kFusion) {
Propagate(position.index, instruction->fused_expression_root(),
memory_space);
}
const HloInstruction* parent_fusion =
instruction->parent()->FusionInstruction();
if (instruction == instruction->parent()->root_instruction() &&
parent_fusion->parent()->IsFusionComputation()) {
Propagate(position.index, parent_fusion, memory_space);
}
if (instruction->opcode() == HloOpcode::kParameter &&
parent_fusion->parent()->IsFusionComputation()) {
const HloInstruction* fusion_operand =
parent_fusion->operand(instruction->parameter_number());
Propagate(position.index, fusion_operand, memory_space);
}
}
for (const HloUse& use : value.GetUses()) {
if (use.instruction->opcode() == HloOpcode::kFusion) {
modified |= Propagate(
use.operand_index,
use.instruction->fused_parameter(use.operand_number), memory_space);
}
}
return modified;
}
} | #include "xla/service/memory_space_propagation.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class MemorySpacePropagationTest : public HloTestBase {
public:
MemorySpacePropagationTest()
: HloTestBase(),
verifier_(false, false) {
}
absl::Status Verify(HloModule* module) {
return verifier_.Run(module).status();
}
private:
HloVerifier verifier_;
};
TEST_F(MemorySpacePropagationTest, NoMemorySpace) {
absl::string_view hlo_string = R"(
HloModule NoMemorySpace
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)} copy(%param2)
%fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_FALSE(memory_space_propagation.Run(module.get()).value());
TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NonTupleOutput) {
absl::string_view hlo_string = R"(
HloModule NonTupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NonTupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, TupleOutput) {
absl::string_view hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
%gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0
%gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1
ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
%gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0
%gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1
ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NestedInputFusion) {
absl::string_view hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[3,2]{0,1:T(128)} parameter(0)
ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[3,2]{0,1:T(128)} parameter(0)
%fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)
}
ENTRY %entry {
%param0 = s32[3,2]{0,1:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[3,2]{0,1:T(128)S(1)} parameter(0)
ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[3,2]{0,1:T(128)S(1)} parameter(0)
%fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion
ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)
}
ENTRY %entry {
%param0 = s32[3,2]{0,1:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NestedOutputFusion) {
absl::string_view hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[6]{0:T(128)} parameter(0)
ROOT %bitcast = s32[3,2]{0,1:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %fusion.1 = s32[3,2]{0,1:T(128)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[6]{0:T(128)} parameter(0)
ROOT %bitcast = s32[3,2]{0,1:T(128)S(1)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)
ROOT %fusion.1 = s32[3,2]{0,1:T(128)S(1)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, BitcastInFusion) {
absl::string_view hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
}
)";
absl::string_view expected_hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)S(1)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)S(1)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
}
} |
1,968 | cpp | tensorflow/tensorflow | sort_simplifier | third_party/xla/xla/service/sort_simplifier.cc | third_party/xla/xla/service/sort_simplifier_test.cc | #ifndef XLA_SERVICE_SORT_SIMPLIFIER_H_
#define XLA_SERVICE_SORT_SIMPLIFIER_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class SortSimplifier : public HloModulePass {
public:
absl::string_view name() const override { return "simplify-sorts"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/sort_simplifier.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
namespace {
absl::StatusOr<bool> RemoveUnusedOperandFromSort(HloInstruction* sort) {
if (!sort->shape().IsTuple()) {
return false;
}
HloComputation* computation = sort->parent();
if (computation->root_instruction() == sort) {
return false;
}
absl::flat_hash_set<int64_t> used_indices;
for (const HloInstruction* user : sort->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
used_indices.insert(user->tuple_index());
}
auto comparator = sort->to_apply();
for (int64_t i = 0; i < sort->operand_count() * 2; ++i) {
if (comparator->parameter_instruction(i)->user_count() > 0) {
used_indices.insert(i / 2);
}
}
if (used_indices.size() == sort->operand_count()) {
return false;
}
std::vector<HloInstruction*> operands;
std::vector<const Shape*> new_shapes;
for (int64_t i = 0; i < sort->operand_count(); ++i) {
if (used_indices.contains(i)) {
operands.push_back(sort->mutable_operand(i));
new_shapes.push_back(&sort->operand(i)->shape());
}
}
Shape new_sort_shape = new_shapes.size() == 1
? *new_shapes[0]
: ShapeUtil::MakeTupleShapeWithPtrs(new_shapes);
HloInstruction* new_sort = computation->AddInstruction(
sort->CloneWithNewOperands(new_sort_shape, operands));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
int64_t parameter_number = 0;
for (int64_t i = 0; i < sort->operand_count(); ++i) {
auto* old_lhs_parameter = comparator->parameter_instruction(i * 2);
auto* old_rhs_parameter = comparator->parameter_instruction(i * 2 + 1);
if (used_indices.contains(i)) {
Shape scalar_shape =
ShapeUtil::MakeShape(sort->operand(i)->shape().element_type(), {});
replacements[old_lhs_parameter] = HloInstruction::CreateParameter(
parameter_number, scalar_shape,
absl::StrCat("p.", parameter_number / 2, ".lhs"));
++parameter_number;
replacements[old_rhs_parameter] = HloInstruction::CreateParameter(
parameter_number, scalar_shape,
absl::StrCat("p.", parameter_number / 2, ".rhs"));
++parameter_number;
} else {
replacements[old_lhs_parameter] = nullptr;
replacements[old_rhs_parameter] = nullptr;
}
}
HloModule* module = sort->GetModule();
HloComputation* new_compare = module->AddEmbeddedComputation(
comparator->CloneWithReplacements(&replacements));
new_sort->set_to_apply(new_compare);
absl::flat_hash_map<int64_t, HloInstruction*> result_map;
if (new_sort->shape().IsTuple()) {
int64_t new_index = 0;
for (int64_t i = 0; i < sort->operand_count(); ++i) {
if (used_indices.count(i)) {
result_map[i] =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
*new_shapes[new_index], new_sort, new_index));
++new_index;
}
}
} else {
CHECK_EQ(used_indices.size(), 1);
result_map[*used_indices.begin()] = new_sort;
}
std::vector<HloInstruction*> users(sort->users().begin(),
sort->users().end());
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(
user->ReplaceAllUsesWith(result_map.at(user->tuple_index())));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(user));
}
return true;
}
}
absl::StatusOr<bool> SortSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before SortSimplifier:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> sort_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(sort_instrs),
HloPredicateIsOp<HloOpcode::kSort>);
}
for (HloInstruction* sort_instr : sort_instrs) {
TF_ASSIGN_OR_RETURN(bool result, RemoveUnusedOperandFromSort(sort_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after SortSimplifier:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after SortSimplifier";
}
return changed;
}
} | #include "xla/service/sort_simplifier.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using SortSimplifierTest = HloTestBase;
TEST_F(SortSimplifierTest, RemoveUnusedSortOperandArrayResult) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
uint64_t num_executions = 0;
do {
num_executions++;
} while (simplifier.Run(module.get()).value());
EXPECT_EQ(num_executions, 2);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(0))));
}
TEST_F(SortSimplifierTest, RemoveUnusedSortOperandTuple) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.2.lhs = u32[] parameter(4)
p.2.rhs = u32[] parameter(5)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,87] parameter(0)
values.0 = s32[64,87] parameter(1)
values.1 = u32[64,87] parameter(2)
sort = (f32[64,87], s32[64,87], u32[64,87]) sort(
keys, values.0, values.1),
dimensions={1}, to_apply=compare
gte.0 = f32[64,87] get-tuple-element(sort), index=0
gte.1 = u32[64,87] get-tuple-element(sort), index=2
ROOT tuple = (f32[64,87], u32[64,87]) tuple(gte.0, gte.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
EXPECT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
GmockMatch(m::Tuple(
m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 0),
m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 1))));
}
TEST_F(SortSimplifierTest, DontRemoveUnusedSortKey) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values), dimensions={1}, to_apply=compare
ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(SortSimplifierTest, RemoveUnusedFirstOperand) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.1.lhs, p.1.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare
ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SortSimplifier simplifier;
uint64_t num_executions = 0;
do {
num_executions++;
} while (simplifier.Run(module.get()).value());
EXPECT_EQ(num_executions, 2);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(1))));
}
}
} |
1,969 | cpp | tensorflow/tensorflow | logistic_expander | third_party/xla/xla/service/logistic_expander.cc | third_party/xla/xla/service/logistic_expander_test.cc | #ifndef XLA_SERVICE_LOGISTIC_EXPANDER_H_
#define XLA_SERVICE_LOGISTIC_EXPANDER_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class LogisticExpander : public OpExpanderPass {
public:
LogisticExpander() = default;
~LogisticExpander() override = default;
absl::string_view name() const override { return "logistic-expander"; }
private:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/logistic_expander.h"
#include <optional>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
bool LogisticExpander::InstructionMatchesPattern(HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kLogistic;
}
absl::StatusOr<HloInstruction*> LogisticExpander::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* operand = instruction->mutable_operand(0);
const Shape operand_shape = operand->shape();
HloInstruction* one_constant = MakeScalarLike(operand, 1.0f);
HloInstruction* exp_instr =
MakeUnaryHlo(HloOpcode::kExp,
MakeUnaryHlo(HloOpcode::kNegate, operand).value())
.value();
HloInstruction* denominator =
MakeBinaryHlo(HloOpcode::kAdd, one_constant, exp_instr).value();
return MakeBinaryHlo(HloOpcode::kDivide, one_constant, denominator).value();
}
} | #include "xla/service/logistic_expander.h"
#include <memory>
#include <string_view>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/dynamic_padder.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
class LogisticExpanderTest : public HloTestBase {};
TEST_F(LogisticExpanderTest, ExpandWith) {
const char* kModuleStr = R"(
HloModule m
test {
p = f32[2,3] parameter(0)
ROOT r = f32[2,3] logistic(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
auto computation = m->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kLogistic);
LogisticExpander logistic_expander;
ASSERT_TRUE(logistic_expander.Run(m.get()).value());
root = computation->root_instruction();
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Divide(
m::Broadcast(m::ConstantScalar(1.0)),
m::AddAnyOrder(m::Broadcast(m::ConstantScalar(1.0)),
m::Exp(m::Negate(m::Parameter(0)))))));
}
TEST_F(LogisticExpanderTest, DynamicDimensions) {
constexpr std::string_view hlo = R"(
HloModule DynamicDimensions
ENTRY main {
p = f32[<=10] parameter(0)
ROOT root = f32[<=10] logistic(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
LogisticExpander logistic_expander;
ASSERT_TRUE(logistic_expander.Run(module.get()).value());
DynamicPadder dynamic_padder;
EXPECT_TRUE(dynamic_padder.Run(module.get()).value());
}
}
} |
1,970 | cpp | tensorflow/tensorflow | hlo_computation_deduplicator | third_party/xla/xla/service/hlo_computation_deduplicator.cc | third_party/xla/xla/service/hlo_computation_deduplicator_test.cc | #ifndef XLA_SERVICE_HLO_COMPUTATION_DEDUPLICATOR_H_
#define XLA_SERVICE_HLO_COMPUTATION_DEDUPLICATOR_H_
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloComputationDeduplicator : public HloModulePass {
private:
bool ContainsLargeConstants(HloComputation* comp);
bool mark_fusion_duplications_;
public:
explicit HloComputationDeduplicator(bool mark_fusion_duplications = false)
: mark_fusion_duplications_(mark_fusion_duplications) {}
absl::string_view name() const override { return "computation-deduplicator"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/hlo_computation_deduplicator.h"
#include <algorithm>
#include <string>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
bool HloComputationDeduplicator::ContainsLargeConstants(HloComputation* comp) {
int total_size = 0;
for (HloInstruction* instruction : comp->instructions()) {
if (instruction->IsConstant()) {
total_size += ShapeUtil::ArrayDataSize(instruction->literal().shape());
if (total_size > 1024) {
return true;
}
}
}
return false;
}
absl::StatusOr<bool> HloComputationDeduplicator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
absl::flat_hash_map<std::string, HloComputation*> unique_comps;
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
HloPrintOptions options = HloPrintOptions::Canonical();
options.set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff);
options.set_print_infeed_outfeed_config(false);
options.set_print_only_essential_constants(true);
options.set_print_operand_shape(true);
options.set_print_ids(false);
options.set_canonicalize_computations(true);
auto comp_eq = [&replacement](const HloComputation* a,
const HloComputation* b) {
if (a->unique_id() == b->unique_id()) return true;
if (replacement.contains(a) &&
replacement.at(a)->unique_id() == b->unique_id()) {
return true;
}
if (replacement.contains(b) &&
replacement.at(b)->unique_id() == a->unique_id()) {
return true;
}
if (replacement.contains(a) && replacement.contains(b) &&
replacement.at(a)->unique_id() == replacement.at(b)->unique_id()) {
return true;
}
return false;
};
for (HloComputation* comp :
module->MakeComputationPostOrder(execution_threads)) {
if (comp->IsEntryComputation() || comp->instruction_count() > 128 ||
ContainsLargeConstants(comp) || comp->IsCollectiveCalledComputation()) {
continue;
}
std::string comp_str = comp->ToString(options);
auto poss_dup = unique_comps.find(comp_str);
if (poss_dup != unique_comps.end() &&
poss_dup->second->Equal(*comp, true,
comp_eq)) {
VLOG(2) << "Replacing " << comp->name() << " with "
<< poss_dup->second->name();
replacement[comp] = poss_dup->second;
} else {
unique_comps[std::move(comp_str)] = comp;
}
}
if (mark_fusion_duplications_) {
module->MarkFusionDuplications(replacement);
} else {
module->ReplaceComputations(replacement);
}
return !replacement.empty();
}
} | #include "xla/service/hlo_computation_deduplicator.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class HloComputationDeduplicatorTest : public HloTestBase {
protected:
std::vector<std::string> RunDeduplicatePass(const std::string_view text,
bool expect_true) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(text).value();
HloComputationDeduplicator dedup;
bool changed = dedup.Run(module.get()).value();
EXPECT_EQ(changed, expect_true);
std::vector<std::string> computation_names;
for (auto comp : module->computations()) {
computation_names.emplace_back(comp->name());
}
return computation_names;
}
};
TEST_F(HloComputationDeduplicatorTest, RemoveRegionBandC) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0}, s32[20]{0})->s32[]}
region_A {
Arg_0.6 = s32[] parameter(0)
Arg_1.7 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.6, Arg_1.7)
}
region_B {
Arg_0.11 = s32[] parameter(0)
Arg_1.12 = s32[] parameter(1)
ROOT add.13 = s32[] add(Arg_0.11, Arg_1.12)
}
region_C {
Arg_0.17 = s32[] parameter(0)
Arg_1.18 = s32[] parameter(1)
ROOT add.19 = s32[] add(Arg_0.17, Arg_1.18)
}
ENTRY main.22 {
Arg_0.1 = s32[10]{0} parameter(0)
Arg_1.2 = s32[15]{0} parameter(1)
Arg_2.3 = s32[20]{0} parameter(2)
constant.4 = s32[] constant(0)
reduce.9 = s32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=region_A
reduce.14 = s32[] reduce(Arg_1.2, constant.4), dimensions={0}, to_apply=region_B
reduce.20 = s32[] reduce(Arg_2.3, constant.4), dimensions={0}, to_apply=region_C
multiply.15 = s32[] multiply(reduce.9, reduce.14)
ROOT multiply.21 = s32[] multiply(multiply.15, reduce.20)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
EXPECT_NE(name, "region_C");
}
EXPECT_EQ(computation_names.size(), 2);
}
TEST_F(HloComputationDeduplicatorTest, RemoveRegionBExactCopy) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
}
EXPECT_EQ(computation_names.size(), 2);
}
TEST_F(HloComputationDeduplicatorTest, RemoveRegionsWithSameSubcomp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_X {
Ag_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT their_sum = s32[] add(Ag_0, Arg_1)
}
region_Y {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT the_sum = s32[] add(Arg_0, Arg_1)
}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.16 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.17 {
Arg_0 = s32[10]{0} parameter(0)
Arg_1 = s32[15]{0} parameter(1)
rd1 = s32[] call(Arg_0, Arg_1), to_apply=main.15
rd2 = s32[] call(Arg_0, Arg_1), to_apply=main.16
ROOT ret = add(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
EXPECT_NE(name, "region_A");
EXPECT_NE(name, "region_Y");
EXPECT_NE(name, "main.16");
}
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionsWithDifferentSubcomp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_X {
Ag_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT their_sum = s32[] multiply(Ag_0, Arg_1)
}
region_Y {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT the_sum = s32[] add(Arg_0, Arg_1)
}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.16 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
main.17 {
Arg_0 = s32[10]{0} parameter(0)
Arg_1 = s32[15]{0} parameter(1)
rd1 = s32[] call(Arg_0, Arg_1), to_apply=main.15
rd2 = s32[] call(Arg_0, Arg_1), to_apply=main.16
ROOT ret = add(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
int region_x_count = 0;
int region_y_count = 0;
int main_16_count = 0;
int main_15_count = 0;
int region_a_count = 0;
int region_b_count = 0;
for (auto name : computation_names) {
region_x_count += (name == "region_X");
region_y_count += (name == "region_Y");
main_15_count += (name == "main.15");
main_16_count += (name == "main.16");
region_a_count += (name == "region_A");
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_a_count, 0);
EXPECT_EQ(region_b_count, 0);
EXPECT_EQ(main_15_count, 1);
EXPECT_EQ(main_16_count, 1);
EXPECT_EQ(region_x_count, 1);
EXPECT_EQ(region_y_count, 1);
}
TEST_F(HloComputationDeduplicatorTest, RemoveRegionBVarDifferences) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, true);
for (auto name : computation_names) {
EXPECT_NE(name, "region_B");
}
EXPECT_EQ(computation_names.size(), 2);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBCommutative) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_1, Arg_0)
}
region_B {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto name : computation_names) {
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionLargeConstant) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_00 = s32[] parameter(0)
Arg_1_1 = s32[] parameter(1)
Arg_0 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_1 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_2 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_3 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_4 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_5 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
add1 = s32[10, 10] add(Arg_1, Arg_0)
add2 = s32[10, 10] add(Arg_2, Arg_3)
add3 = s32[10, 10] add(Arg_4, Arg_5)
add8 = s32[10, 10] add(add1, add2)
addv = s32[10, 10] add(add3, add8)
ROOT ret = add(Arg_00, Arg_1_1)
}
region_B {
Arg_00 = s32[] parameter(0)
Arg_1_1 = s32[] parameter(1)
Arg_0 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_1 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_2 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_3 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_4 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
Arg_5 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10},
{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}})
add1 = s32[10, 10] add(Arg_1, Arg_0)
add2 = s32[10, 10] add(Arg_2, Arg_3)
add3 = s32[10, 10] add(Arg_4, Arg_5)
add8 = s32[10, 10] add(add1, add2)
addv = s32[10, 10] add(add3, add8)
ROOT ret = add(Arg_00, Arg_1_1)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto comp : computation_names) {
region_b_count += (comp == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentcomp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto name : computation_names) {
region_b_count += (name == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentType) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s16[15]{0})->s16[]}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s16[] parameter(0)
Arg_1.6 = s16[] parameter(1)
ROOT add.7 = s16[] multiply(Arg_0.5, Arg_1.6)
}
ENTRY main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(5)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s16[15]{0} parameter(1)
constant.4 = s16[] constant(5)
rd2 = s16[] reduce(Arg_1.2, constant.4), dimensions={0}, to_apply=region_B
}
)";
auto computation_names = RunDeduplicatePass(text, false);
int region_b_count = 0;
for (auto comp : computation_names) {
region_b_count += (comp == "region_B");
}
EXPECT_EQ(region_b_count, 1);
EXPECT_EQ(computation_names.size(), 3);
}
TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBEntryComp) {
const std::string_view text = R"(
HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]}
region_A1 {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B1 {
Arg_0.2 = s32[] parameter(0)
Arg_1.3 = s32[] parameter(1)
ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3)
}
ENTRY region_B {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A1
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B1
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
region_A {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A1
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B1
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
EXPECT_EQ(computation_names.size(), 4);
}
TEST_F(HloComputationDeduplicatorTest, LargeSubComputationTest) {
const Shape shape = ShapeUtil::MakeScalarShape(S32);
const int total_regions = 2;
const int max_insns = 128;
std::vector<HloComputation> comps;
auto module = CreateNewVerifiedModule();
for (int region = 0; region < total_regions; region++) {
HloComputation::Builder builder("region_" + std::to_string(region));
auto curr =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a0"));
auto next =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "a1"));
for (int i = 0; i < max_insns; i++) {
next = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, curr, next));
}
module->AddComputationAndUnifyNamesAndIds(builder.Build(), false);
}
HloComputation::Builder main("main_func");
std::vector<HloInstruction *> insns;
std::vector<HloInstruction *> consts;
for (int region = 0; region < total_regions; region++) {
insns.push_back(main.AddInstruction(
HloInstruction::CreateParameter(region, ShapeUtil::MakeShape(S32, {10}),
"a" + std::to_string(region))));
consts.push_back(main.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(5 + region))));
}
int region = 0;
for (auto comp : module->computations()) {
ASSERT_LT(region, total_regions);
main.AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeScalarShape(S32), insns[region], consts[region],
{0}, comp));
}
module->AddEntryComputation(main.Build());
HloComputationDeduplicator dedup;
TF_ASSERT_OK_AND_ASSIGN(bool changed, dedup.Run(module.get()));
EXPECT_FALSE(changed);
std::vector<HloComputation *> computations = module->MakeComputationSorted();
EXPECT_EQ(computations.size(), (total_regions + 1));
}
TEST_F(HloComputationDeduplicatorTest, DontDeduplicateReduceAllReduce) {
const std::string_view text = R"(
HloModule TestModule
add.1 {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add.2 = s32[] add(Arg_0, Arg_1)
}
add.2 {
Arg_0 = s32[] parameter(0)
Arg_1 = s32[] parameter(1)
ROOT add.2 = s32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0.1 = s32[10] parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=add.1
Arg_1.1 = s32[] parameter(1)
rd2 = s32[] all-reduce(Arg_1.1), to_apply=add.2
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
auto computation_names = RunDeduplicatePass(text, false);
EXPECT_EQ(computation_names.size(), 3);
}
}
} |
1,971 | cpp | tensorflow/tensorflow | compilation_environments | third_party/xla/xla/service/compilation_environments.cc | third_party/xla/xla/service/compilation_environments_test.cc | #include "tsl/platform/status.h"
#ifndef XLA_SERVICE_COMPILATION_ENVIRONMENTS_H_
#define XLA_SERVICE_COMPILATION_ENVIRONMENTS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <string_view>
#include <typeindex>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "xla/xla.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/protobuf.h"
namespace xla {
class CompilationEnvironments {
public:
using ProcessNewEnvFn =
std::function<absl::StatusOr<std::unique_ptr<tsl::protobuf::Message>>(
std::unique_ptr<tsl::protobuf::Message>)>;
CompilationEnvironments() = default;
CompilationEnvironments(const CompilationEnvironments& rhs) { *this = rhs; }
CompilationEnvironments& operator=(const CompilationEnvironments& rhs);
~CompilationEnvironments() = default;
static absl::StatusOr<std::unique_ptr<CompilationEnvironments>>
CreateFromProto(const CompilationEnvironmentsProto& proto);
static void RegisterProcessNewEnvFn(
const tsl::protobuf::Descriptor* descriptor,
ProcessNewEnvFn process_new_env);
absl::Status AddEnv(std::unique_ptr<tsl::protobuf::Message> env);
template <typename T>
T& GetMutableEnv();
template <typename T>
const T& GetEnv();
template <typename T>
bool HasEnv();
void Clear() { environments_.clear(); }
CompilationEnvironmentsProto ToProto() const;
private:
static ProcessNewEnvFn GetProcessNewEnvFn(
const tsl::protobuf::Descriptor& descriptor);
static void DefaultEnvCreatedByCompilationEnvironments(
std::string_view env_type);
static void EnvAdded(std::string_view env_type);
absl::Status AddEnvImpl(const tsl::protobuf::Descriptor& descriptor,
std::unique_ptr<tsl::protobuf::Message> env);
absl::flat_hash_map<const tsl::protobuf::Descriptor*,
std::unique_ptr<tsl::protobuf::Message>>
environments_;
};
template <typename T>
T& CompilationEnvironments::GetMutableEnv() {
auto descriptor = T::descriptor();
auto it = environments_.find(descriptor);
if (it == environments_.end()) {
TF_CHECK_OK(AddEnvImpl(*descriptor, nullptr));
DefaultEnvCreatedByCompilationEnvironments(descriptor->full_name());
it = environments_.find(descriptor);
}
return tensorflow::down_cast<T&>(*it->second);
}
template <typename T>
const T& CompilationEnvironments::GetEnv() {
return GetMutableEnv<T>();
}
template <typename T>
bool CompilationEnvironments::HasEnv() {
auto descriptor = T::descriptor();
return environments_.find(descriptor) != environments_.end();
}
}
#endif
#include "xla/service/compilation_environments.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
ABSL_CONST_INIT absl::Mutex process_new_env_fns_mu(absl::kConstInit);
absl::flat_hash_map<const tsl::protobuf::Descriptor*,
CompilationEnvironments::ProcessNewEnvFn>*
process_new_env_fns ABSL_GUARDED_BY(process_new_env_fns_mu) = nullptr;
class GlobalCompEnvStats {
public:
static GlobalCompEnvStats& GetSingleton() {
static GlobalCompEnvStats* singleton = new GlobalCompEnvStats();
return *singleton;
}
void DefaultEnvCreatedByCompilationEnvironments(std::string_view env_type)
ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
++stats_[std::string(env_type)]
.default_env_created_by_compilation_environments;
}
VLOG(1) << "New GlobalCompEnvStats value: " << ToString();
}
void EnvAdded(std::string_view env_type) ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
++stats_[std::string(env_type)].env_added;
}
VLOG(1) << "New GlobalCompEnvStats value: " << ToString();
}
std::string ToString() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::ReaderMutexLock l(&mu_);
return absl::StrJoin(
stats_, "; ",
[](std::string* out, const StatMap::value_type& env_stats_pair) {
absl::StrAppend(out, env_stats_pair.first, ": { ",
env_stats_pair.second.ToString(), " }");
});
}
private:
struct PerEnvStats {
std::string ToString() const {
return absl::StrCat(
"# default envs created by CompilationEnvironments: ",
default_env_created_by_compilation_environments, " ",
"# envs added to CompilationEnvironments: ", env_added);
}
unsigned default_env_created_by_compilation_environments = 0;
unsigned env_added = 0;
};
using StatMap = absl::flat_hash_map<std::string, PerEnvStats>;
GlobalCompEnvStats() = default;
GlobalCompEnvStats(const GlobalCompEnvStats&) = delete;
GlobalCompEnvStats& operator=(const GlobalCompEnvStats&) = delete;
GlobalCompEnvStats(GlobalCompEnvStats&&) = delete;
GlobalCompEnvStats& operator=(GlobalCompEnvStats&&) = delete;
mutable absl::Mutex mu_;
StatMap stats_ ABSL_GUARDED_BY(mu_);
};
}
CompilationEnvironments& CompilationEnvironments::operator=(
const CompilationEnvironments& rhs) {
Clear();
for (const auto& descriptor_message_pair : rhs.environments_) {
auto env = absl::WrapUnique(descriptor_message_pair.second->New());
env->CopyFrom(*descriptor_message_pair.second);
environments_.insert({descriptor_message_pair.first, std::move(env)});
}
return *this;
}
absl::StatusOr<std::unique_ptr<CompilationEnvironments>>
CompilationEnvironments::CreateFromProto(
const CompilationEnvironmentsProto& proto) {
auto envs = std::make_unique<CompilationEnvironments>();
const tsl::protobuf::DescriptorPool* const pool =
tsl::protobuf::DescriptorPool::generated_pool();
for (const auto& env_proto : proto.environments()) {
std::string fullname;
if (!google::protobuf::Any::ParseAnyTypeUrl(env_proto.type_url(),
&fullname)) {
return tsl::errors::DataLoss(
"Invalid CompilationEnvironment message type url: %s",
env_proto.type_url());
}
const tsl::protobuf::Descriptor* const descriptor =
pool->FindMessageTypeByName(fullname);
if (descriptor == nullptr) {
return tsl::errors::DataLoss(
"Unknown CompilationEnvironment message type: %s", fullname);
}
const tsl::protobuf::Message* const prototype =
tsl::protobuf::MessageFactory::generated_factory()->GetPrototype(
descriptor);
if (prototype == nullptr) {
return tsl::errors::Internal(
"Unsupported CompilationEnvironment message type: %s", fullname);
}
std::unique_ptr<tsl::protobuf::Message> env(prototype->New());
if (!env_proto.UnpackTo(env.get())) {
return tsl::errors::DataLoss(
"Unable to unpack CompilationEnvironment message of type '%s'",
fullname);
}
TF_RETURN_IF_ERROR(envs->AddEnv(std::move(env)));
}
return envs;
}
void CompilationEnvironments::RegisterProcessNewEnvFn(
const tsl::protobuf::Descriptor* descriptor,
ProcessNewEnvFn process_new_env) {
absl::MutexLock l(&process_new_env_fns_mu);
if (process_new_env_fns == nullptr) {
process_new_env_fns =
new absl::flat_hash_map<const tsl::protobuf::Descriptor*,
CompilationEnvironments::ProcessNewEnvFn>();
}
const bool inserted =
process_new_env_fns->insert({descriptor, std::move(process_new_env)})
.second;
CHECK(inserted) << "ProcessNewEnvFn for XLA compilation environment '"
<< descriptor->full_name() << "' has already been registered";
}
absl::Status CompilationEnvironments::AddEnv(
std::unique_ptr<tsl::protobuf::Message> env) {
if (!env) {
return tsl::errors::InvalidArgument(
"Can not add a null compilation environment.");
}
const tsl::protobuf::Descriptor& descriptor = *env->GetDescriptor();
return AddEnvImpl(descriptor, std::move(env));
}
CompilationEnvironmentsProto CompilationEnvironments::ToProto() const {
std::vector<const tsl::protobuf::Descriptor*> descriptors;
descriptors.reserve(environments_.size());
for (const auto& [descriptor, message] : environments_) {
descriptors.push_back(descriptor);
}
absl::c_sort(descriptors, [](const tsl::protobuf::Descriptor* lhs,
const tsl::protobuf::Descriptor* rhs) {
return lhs->full_name() < rhs->full_name();
});
CompilationEnvironmentsProto proto;
for (const auto* const descriptor : descriptors) {
proto.add_environments()->PackFrom(*environments_.at(descriptor));
}
return proto;
}
CompilationEnvironments::ProcessNewEnvFn
CompilationEnvironments::GetProcessNewEnvFn(
const tsl::protobuf::Descriptor& descriptor) {
absl::MutexLock l(&process_new_env_fns_mu);
if (process_new_env_fns == nullptr) {
return nullptr;
}
const auto it = process_new_env_fns->find(&descriptor);
if (it == process_new_env_fns->end()) {
return nullptr;
}
return it->second;
}
void CompilationEnvironments::DefaultEnvCreatedByCompilationEnvironments(
std::string_view env_type) {
GlobalCompEnvStats::GetSingleton().DefaultEnvCreatedByCompilationEnvironments(
env_type);
}
void CompilationEnvironments::EnvAdded(std::string_view env_type) {
GlobalCompEnvStats::GetSingleton().EnvAdded(env_type);
}
absl::Status CompilationEnvironments::AddEnvImpl(
const tsl::protobuf::Descriptor& descriptor,
std::unique_ptr<tsl::protobuf::Message> env) {
if (environments_.contains(&descriptor)) {
return tsl::errors::InvalidArgument(
"Replacing CompilationEnvironment of type %s.", descriptor.full_name());
}
ProcessNewEnvFn process_new_env = GetProcessNewEnvFn(descriptor);
if (!process_new_env) {
return tsl::errors::InvalidArgument(
"Unknown compilation environment type: %s", descriptor.full_name());
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<tsl::protobuf::Message> processed_env,
process_new_env(std::move(env)));
const tsl::protobuf::UnknownFieldSet& unknown_fields =
processed_env->GetReflection()->GetUnknownFields(*processed_env);
std::vector<int> unknown_tags;
unknown_tags.reserve(unknown_fields.field_count());
for (int i = 0; i < unknown_fields.field_count(); ++i) {
const tsl::protobuf::UnknownField& field = unknown_fields.field(i);
unknown_tags.push_back(field.number());
}
if (!unknown_tags.empty()) {
LOG(WARNING) << "CompilationEnvironment " << descriptor.full_name()
<< " contains unknown fields with tag numbers: "
<< absl::StrJoin(unknown_tags, ", ");
}
environments_.insert({&descriptor, std::move(processed_env)});
EnvAdded(descriptor.full_name());
return absl::OkStatus();
}
} | #include "xla/service/compilation_environments.h"
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "xla/service/test_compilation_environment.pb.h"
#include "xla/test.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/protobuf.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv1(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment1> env(
tensorflow::down_cast<test::TestCompilationEnvironment1*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment1>();
}
if (env->some_flag() == 0 || env->some_flag() == 1) {
env->set_some_flag(100);
}
return env;
}
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv2(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment2> env(
tensorflow::down_cast<test::TestCompilationEnvironment2*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment2>();
}
if (env->some_other_flag() == 0) {
env->set_some_other_flag(200);
}
return env;
}
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv3(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment3> env(
tensorflow::down_cast<test::TestCompilationEnvironment3*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment3>();
}
if (env->a_third_flag() == 0) {
env->set_a_third_flag(300);
}
return env;
}
namespace test {
namespace {
class CompilationEnvironmentsTest : public ::testing::Test {
protected:
static void SetUpTestSuite() {
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment1::descriptor(), ProcessNewEnv1);
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment2::descriptor(), ProcessNewEnv2);
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment3::descriptor(), ProcessNewEnv3);
}
};
TEST_F(CompilationEnvironmentsTest, GetDefaultEnv) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, GetDefaultMutableEnv) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, GetAddedEnvNotModifiedByProcessNewEnv) {
CompilationEnvironments envs;
auto env = std::make_unique<TestCompilationEnvironment1>();
env->set_some_flag(5);
TF_ASSERT_OK(envs.AddEnv(std::move(env)));
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 5);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 5);
}
TEST_F(CompilationEnvironmentsTest, GetAddedEnvModifiedByProcessNewEnv) {
CompilationEnvironments envs;
auto env = std::make_unique<TestCompilationEnvironment1>();
env->set_some_flag(1);
TF_ASSERT_OK(envs.AddEnv(std::move(env)));
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, MultipleEnvs) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment2>().some_other_flag(), 200);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, MultipleMutableEnvs) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment2>().some_other_flag(),
200);
envs.GetMutableEnv<TestCompilationEnvironment1>().set_some_flag(101);
envs.GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(201);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 101);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment2>().some_other_flag(),
201);
}
TEST_F(CompilationEnvironmentsTest, CopyConstructor) {
auto envs = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs->AddEnv(std::move(env2)));
envs->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto envs_copy = std::make_unique<CompilationEnvironments>(*envs);
envs.reset();
EXPECT_EQ(envs_copy->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs_copy->GetEnv<TestCompilationEnvironment2>().some_other_flag(),
20);
}
TEST_F(CompilationEnvironmentsTest, CopyAssignment) {
auto envs1 = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs1->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs1->AddEnv(std::move(env2)));
envs1->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto envs2 = std::make_unique<CompilationEnvironments>();
auto env3 = std::make_unique<TestCompilationEnvironment1>();
env3->set_some_flag(30);
TF_ASSERT_OK(envs2->AddEnv(std::move(env3)));
auto env4 = std::make_unique<TestCompilationEnvironment3>();
env4->set_a_third_flag(40);
TF_ASSERT_OK(envs2->AddEnv(std::move(env4)));
*envs2 = *envs1;
envs1.reset();
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment2>().some_other_flag(), 20);
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment3>().a_third_flag(), 300);
}
TEST_F(CompilationEnvironmentsTest, ProtoRoundTrip) {
auto envs = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs->AddEnv(std::move(env2)));
envs->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto proto = envs->ToProto();
TF_ASSERT_OK_AND_ASSIGN(auto envs_deserialized,
CompilationEnvironments::CreateFromProto(proto));
EXPECT_EQ(
envs_deserialized->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs_deserialized->GetEnv<TestCompilationEnvironment2>()
.some_other_flag(),
20);
}
TEST_F(CompilationEnvironmentsTest, EnvTypePresenceCheck) {
CompilationEnvironments envs;
EXPECT_FALSE(envs.HasEnv<TestCompilationEnvironment1>());
envs.GetEnv<TestCompilationEnvironment1>();
EXPECT_TRUE(envs.HasEnv<TestCompilationEnvironment1>());
}
}
}
} |
1,972 | cpp | tensorflow/tensorflow | stochastic_convert_decomposer | third_party/xla/xla/service/stochastic_convert_decomposer.cc | third_party/xla/xla/service/stochastic_convert_decomposer_test.cc | #ifndef XLA_SERVICE_STOCHASTIC_CONVERT_DECOMPOSER_H_
#define XLA_SERVICE_STOCHASTIC_CONVERT_DECOMPOSER_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class StochasticConvertDecomposer : public HloModulePass {
public:
absl::string_view name() const override {
return "stochastic_convert_decomposer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/stochastic_convert_decomposer.h"
#include <cstdint>
#include <limits>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Status DecomposeStochasticConvert(HloComputation* comp,
HloInstruction* instruction) {
CHECK(instruction->opcode() == HloOpcode::kStochasticConvert)
<< "requires a stochastic_convert instruction to decompose, but got: "
<< instruction->opcode();
CHECK(instruction->operand_count() == 2)
<< "requires 2 operands for stochastic convert, but got: "
<< instruction->operand_count();
HloInstruction* operand = instruction->mutable_operand(0);
HloInstruction* random = instruction->mutable_operand(1);
PrimitiveType from_type = operand->shape().element_type();
PrimitiveType random_type = random->shape().element_type();
PrimitiveType to_type = instruction->shape().element_type();
TF_RETURN_IF_ERROR(ShapeInference::InferStochasticConvertShape(
operand->shape(), random->shape(), to_type)
.status());
VLOG(1) << "Decomposing instruction: " << instruction->ToString();
if (primitive_util::IsSignedIntegralType(to_type)) {
TF_ASSIGN_OR_RETURN(HloInstruction * operand_sign,
MakeUnaryHlo(HloOpcode::kSign, operand));
TF_ASSIGN_OR_RETURN(HloInstruction * should_neg,
MakeCompareHlo(Comparison::Direction::kLt, operand_sign,
MakeScalarLike(operand_sign, 0)));
TF_ASSIGN_OR_RETURN(HloInstruction * operand_abs,
MakeUnaryHlo(HloOpcode::kAbs, operand));
TF_ASSIGN_OR_RETURN(HloInstruction * truncated_fp,
MakeUnaryHlo(HloOpcode::kFloor, operand_abs));
TF_ASSIGN_OR_RETURN(
HloInstruction * fractional,
MakeBinaryHlo(HloOpcode::kSubtract, operand_abs, truncated_fp));
if (from_type == F16) {
fractional = MakeConvertToHlo(fractional, F32);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * fixed_fractional,
MakeBinaryHlo(
HloOpcode::kMultiply, fractional,
MakeScalarLike(fractional, IPow<double>(2, primitive_util::BitWidth(
random_type)))));
TF_ASSIGN_OR_RETURN(
HloInstruction * should_round_up,
MakeCompareHlo(Comparison::Direction::kLt, random,
MakeConvertToHlo(fixed_fractional, random_type)));
HloInstruction* truncated_int = MakeConvertToHlo(truncated_fp, to_type);
TF_ASSIGN_OR_RETURN(
truncated_int,
MakeSelectHlo(should_round_up,
MakeBinaryHlo(HloOpcode::kAdd, truncated_int,
MakeScalarLike(truncated_int, 1))
.value(),
truncated_int));
TF_ASSIGN_OR_RETURN(
HloInstruction * result,
MakeSelectHlo(should_neg,
MakeUnaryHlo(HloOpcode::kNegate, truncated_int).value(),
truncated_int));
auto to_bits = primitive_util::BitWidth(to_type);
auto min = static_cast<int64_t>(
(static_cast<uint64_t>(1) + ~static_cast<uint64_t>(1))
<< (to_bits - 1));
TF_ASSIGN_OR_RETURN(HloInstruction * is_min,
MakeCompareHlo(Comparison::Direction::kLe, operand,
MakeScalarLike(operand, min)));
TF_ASSIGN_OR_RETURN(
result, MakeSelectHlo(is_min, MakeScalarLike(result, min), result));
auto max =
static_cast<int64_t>((static_cast<uint64_t>(1) << (to_bits - 1)) - 1);
TF_ASSIGN_OR_RETURN(HloInstruction * is_max,
MakeCompareHlo(Comparison::Direction::kGe, operand,
MakeScalarLike(operand, max)));
TF_ASSIGN_OR_RETURN(
result, MakeSelectHlo(is_max, MakeScalarLike(result, max), result));
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(comp->RemoveInstruction(instruction));
return absl::OkStatus();
}
return Internal("Unsupported stochastic convert: from %s to %s",
PrimitiveType_Name(from_type),
PrimitiveType_Name(to_type));
}
absl::StatusOr<bool> StochasticConvertDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kStochasticConvert) {
continue;
}
TF_RETURN_IF_ERROR(DecomposeStochasticConvert(computation, instruction));
changed = true;
}
}
return changed;
}
} | #include "xla/service/stochastic_convert_decomposer.h"
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using StochasticConvertDecomposerTest = HloTestBase;
using ::testing::HasSubstr;
TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertF32ToS32) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = f32[65536]{0} parameter(0)
%random_param.2 = u32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Negate(),
op::Select()))));
}
TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertBF16ToS8) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = bf16[65536]{0} parameter(0)
%random_param.2 = u16[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s8[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u16[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Negate(),
op::Select()))));
}
TEST_F(StochasticConvertDecomposerTest, WrongRandomBitWidth) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = bf16[65536]{0} parameter(0)
%random_param.2 = u32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
auto result = decomposer.Run(module.get());
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(), HasSubstr("have same bits"));
}
TEST_F(StochasticConvertDecomposerTest, WrongRandomType) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = f32[65536]{0} parameter(0)
%random_param.2 = s32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, s32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
auto result = decomposer.Run(module.get());
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("must be unsigned integers"));
}
}
} |
1,973 | cpp | tensorflow/tensorflow | dynamic_padder | third_party/xla/xla/service/dynamic_padder.cc | third_party/xla/xla/service/dynamic_padder_test.cc | #ifndef XLA_SERVICE_DYNAMIC_PADDER_H_
#define XLA_SERVICE_DYNAMIC_PADDER_H_
#include <functional>
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
struct DynamicPadderOptions {
OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr;
DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler =
nullptr;
bool slice_dynamic_output = true;
DynamicDimensionInference::AssertionGenerator assertion_generator;
DynamicDimensionInference::ShapeCheckMode shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kIgnore;
};
class DynamicPadder : public HloModulePass {
public:
explicit DynamicPadder(DynamicPadderOptions options = DynamicPadderOptions())
: options_(options) {}
absl::string_view name() const override { return "dynamic_padder"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
DynamicPadderOptions options_;
};
}
#endif
#include "xla/service/dynamic_padder.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/xla_builder.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_window_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/monitoring/gauge.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
auto* dynamic_padding_gauge = tsl::monitoring::Gauge<bool, 0>::New(
"/tensorflow/core/use_dynamic_padding_gauge",
"Tracks if dynamic padder is used.");
absl::StatusOr<HloInstruction*> ChooseIdentityValue(HloInstruction* inst,
int64_t operand_number) {
if (inst->IsElementwise()) {
return nullptr;
}
if (inst->opcode() == HloOpcode::kSelectAndScatter ||
inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) {
if (operand_number == 1) {
return inst->mutable_operand(2);
}
TF_RET_CHECK(operand_number == 0);
HloComputation* select = inst->called_computations()[0];
if (Match(select->root_instruction(),
match::Compare(match::Parameter(), match::Parameter())
.WithComparisonDirection(ComparisonDirection::kGe))) {
return inst->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MinValue(inst->operand(0)->shape().element_type())));
} else {
return Unimplemented(
"Only select and scatter with `max` as select function is "
"supported, got %s",
select->ToString());
}
}
switch (inst->opcode()) {
case HloOpcode::kReduce: {
auto* reduce = Cast<HloReduceInstruction>(inst);
TF_RET_CHECK(operand_number < reduce->input_count())
<< "Only data operand with dynamic dimension is valid.";
int64_t init_value_index = reduce->input_count() + operand_number;
return inst->mutable_operand(init_value_index);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(inst);
TF_RET_CHECK(operand_number < reduce_window->input_count())
<< "Only data operand with dynamic dimension is valid.";
int64_t init_value_index = reduce_window->input_count() + operand_number;
return inst->mutable_operand(init_value_index);
}
case HloOpcode::kConvolution:
case HloOpcode::kDot: {
PrimitiveType ptype = inst->operand(0)->shape().element_type();
return inst->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype)));
}
case HloOpcode::kPad:
return inst->mutable_operand(1);
case HloOpcode::kScatter: {
if (operand_number != 1) {
return nullptr;
}
PrimitiveType indices_ptype =
inst->operand(operand_number)->shape().element_type();
return inst->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(indices_ptype)));
}
case HloOpcode::kParameter:
case HloOpcode::kGather:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kTuple:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kBroadcast:
case HloOpcode::kTranspose:
case HloOpcode::kSort:
case HloOpcode::kSlice:
case HloOpcode::kDomain:
return nullptr;
case HloOpcode::kCustomCall:
return nullptr;
default:
return UnimplementedStrCat("Unimplemented padding for instruction: ",
inst->ToString());
}
}
absl::StatusOr<bool> ReplaceGetSize(
HloInstruction* instr,
DynamicDimensionInference* dynamic_dimension_inference) {
if (instr->opcode() != HloOpcode::kGetDimensionSize) {
return false;
}
HloComputation* computation = instr->parent();
TF_ASSIGN_OR_RETURN(auto legal_shape,
ShapeInference::InferGetDimensionSizeShape(
instr->operand(0)->shape(), instr->dimension()));
TF_RET_CHECK(ShapeUtil::Equal(instr->shape(), legal_shape))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "legal_shape " << legal_shape.ToString();
TF_RET_CHECK(ShapeUtil::HasPrimitiveType(instr->shape(), S32));
HloInstruction* operand = instr->mutable_operand(0);
int64_t dim = instr->dimension();
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, dim);
if (dynamic_size != nullptr) {
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(dynamic_size));
dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(
instr, dynamic_size);
} else {
int32_t size = instr->operand(0)->shape().dimensions(dim);
HloInstruction* new_instr = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size)));
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(new_instr));
dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(instr,
new_instr);
}
return true;
}
absl::StatusOr<bool> ReplaceSetSize(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kSetDimensionSize) {
return false;
}
TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(
instr->shape(), instr->operand(0)->shape()))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "instruction operand shape " << instr->operand(0)->shape();
HloInstruction* operand = instr->mutable_operand(0);
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));
return true;
}
absl::StatusOr<bool> ReplaceSetBound(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall ||
instr->custom_call_target() != "SetBound") {
return false;
}
TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(
instr->shape(), instr->operand(0)->shape()))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "instruction operand shape " << instr->operand(0)->shape();
HloInstruction* operand = instr->mutable_operand(0);
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));
return true;
}
bool ShouldSkipPadOnOperand(
const HloInstruction* inst, int64_t operand_num, int64_t dimension,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
switch (inst->opcode()) {
case HloOpcode::kConvolution: {
if (operand_num == 0) {
if (dimension ==
inst->convolution_dimension_numbers().input_batch_dimension()) {
return true;
}
const auto& spatial_dims =
inst->convolution_dimension_numbers().input_spatial_dimensions();
for (int64_t spatial_dim = 0; spatial_dim < spatial_dims.size();
++spatial_dim) {
if (spatial_dims[spatial_dim] == dimension &&
inst->window().dimensions(spatial_dim).size() == 1) {
return true;
}
}
}
return operand_num == 1 &&
(dimension == inst->convolution_dimension_numbers()
.kernel_output_feature_dimension());
}
case HloOpcode::kDot: {
if (operand_num == 0) {
return !absl::c_linear_search(
inst->dot_dimension_numbers().lhs_contracting_dimensions(),
dimension);
}
return !absl::c_linear_search(
inst->dot_dimension_numbers().rhs_contracting_dimensions(),
dimension);
}
case HloOpcode::kReduce:
return !absl::c_linear_search(inst->dimensions(), dimension);
case HloOpcode::kSelectAndScatter:
case HloOpcode::kReduceWindow:
return inst->window().dimensions(dimension).size() == 1;
case HloOpcode::kAsyncStart:
if (!HloInstruction::IsThreadIncluded(inst->async_execution_thread(),
execution_threads)) {
return true;
}
return false;
default:
return false;
}
}
HloInstruction* PadWithScalar(HloInstruction* inst, int64_t dim,
HloInstruction* dynamic_size,
HloInstruction* padding_scalar) {
CHECK(inst != nullptr && dynamic_size != nullptr &&
padding_scalar != nullptr);
const Shape mask_shape =
ShapeUtil::MakeShape(xla::S32, inst->shape().dimensions());
const Shape pred_shape =
ShapeUtil::MakeShape(xla::PRED, inst->shape().dimensions());
HloInstruction* iota =
inst->AddInstruction(HloInstruction::CreateIota(mask_shape, dim));
HloInstruction* broadcasted_effective_size = inst->AddInstruction(
HloInstruction::CreateBroadcast(mask_shape, dynamic_size, {}));
HloInstruction* pred = inst->AddInstruction(HloInstruction::CreateCompare(
pred_shape, iota, broadcasted_effective_size, ComparisonDirection::kLt));
HloInstruction* broadcasted_identity_value =
inst->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(inst->shape()), padding_scalar, {}));
HloInstruction* padded = inst->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeStaticShape(inst->shape()), HloOpcode::kSelect, pred, inst,
broadcasted_identity_value));
return padded;
}
HloInstruction* GenerateBinaryMask(
HloInstruction* reshape, int64_t input_dim,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> output_dynamic_dims, HloInstruction* one,
HloInstruction* zero, bool split_input) {
Shape input_shape =
split_input ? reshape->operand(0)->shape() : reshape->shape();
Shape output_shape =
split_input ? reshape->shape() : reshape->operand(0)->shape();
const Shape mask_input_shape =
ShapeUtil::MakeShape(xla::S32, {input_shape.dimensions(input_dim)});
const Shape pred_input_shape =
ShapeUtil::MakeShape(xla::PRED, {input_shape.dimensions(input_dim)});
HloInstruction* pred_true = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* input_shape_pred_mask = reshape->AddInstruction(
HloInstruction::CreateBroadcast(pred_input_shape, pred_true, {}));
bool need_rewrite = false;
HloInstruction* iota =
reshape->AddInstruction(HloInstruction::CreateIota(mask_input_shape, 0));
for (int64_t i = 1; i < output_dims.size(); ++i) {
if (output_dynamic_dims[output_dims[i]] != nullptr) {
need_rewrite = true;
break;
}
}
if (!need_rewrite) {
return nullptr;
}
for (int64_t i = output_dims.size() - 1; i > 0; i--) {
const int64_t output_dim = output_dims[i];
HloInstruction* dynamic_size = output_dynamic_dims[output_dim];
HloInstruction* static_output_dim_size = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
output_shape.dimensions(output_dim))));
HloInstruction* broadcasted_static_output_dim_size =
reshape->AddInstruction(HloInstruction::CreateBroadcast(
mask_input_shape, static_output_dim_size, {}));
if (dynamic_size != nullptr) {
HloInstruction* dim_index =
reshape->AddInstruction(HloInstruction::CreateBinary(
mask_input_shape, HloOpcode::kRemainder, iota,
broadcasted_static_output_dim_size));
HloInstruction* broadcasted_effective_size = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, dynamic_size, {}));
HloInstruction* selected =
reshape->AddInstruction(HloInstruction::CreateCompare(
pred_input_shape, dim_index, broadcasted_effective_size,
ComparisonDirection::kLt));
input_shape_pred_mask = reshape->AddInstruction(
HloInstruction::CreateBinary(pred_input_shape, HloOpcode::kAnd,
input_shape_pred_mask, selected));
}
iota = reshape->AddInstruction(
HloInstruction::CreateBinary(mask_input_shape, HloOpcode::kDivide, iota,
broadcasted_static_output_dim_size));
}
HloInstruction* broadcasted_one = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, one, {}));
HloInstruction* broadcasted_zero = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, zero, {}));
return reshape->AddInstruction(HloInstruction::CreateTernary(
mask_input_shape, HloOpcode::kSelect, input_shape_pred_mask,
broadcasted_one, broadcasted_zero));
}
absl::StatusOr<bool> RewriteDynamicReshapeSplitInput(
HloInstruction* reshape, int64_t input_dim,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> output_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
VLOG(2) << "Reshaping input dim " << input_dim << " to "
<< VectorString(output_dims);
const Shape operand_shape = reshape->operand(0)->shape();
TF_RET_CHECK(output_dims.size() > 1);
const Shape mask_input_shape =
ShapeUtil::MakeShape(xla::S32, {operand_shape.dimensions(input_dim)});
const Shape pred_input_shape =
ShapeUtil::MakeShape(xla::PRED, {operand_shape.dimensions(input_dim)});
HloInstruction* zero = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
HloInstruction* one = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
HloInstruction* input_shape_binary_mask =
GenerateBinaryMask(reshape, input_dim, output_dims, output_dynamic_dims,
one, zero, true);
if (input_shape_binary_mask == nullptr) {
VLOG(2) << "No need to rewrite";
return false;
}
auto embedded_builder = HloComputation::Builder("add");
{
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
}
HloComputation* add =
reshape->GetModule()->AddEmbeddedComputation(embedded_builder.Build());
Window cumsum_window;
WindowDimension* dim = cumsum_window.add_dimensions();
dim->set_size(operand_shape.dimensions(input_dim));
dim->set_stride(1);
dim->set_padding_low(operand_shape.dimensions(input_dim) - 1);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
HloInstruction* cumsum =
reshape->AddInstruction(HloInstruction::CreateReduceWindow(
mask_input_shape, input_shape_binary_mask, zero, cumsum_window, add));
HloInstruction* broadcast_ones = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, one, {}));
cumsum = reshape->AddInstruction(HloInstruction::CreateBinary(
mask_input_shape, HloOpcode::kSubtract, cumsum, broadcast_ones));
GatherDimensionNumbers gather_dim_numbers;
for (int64_t i = 0; i < operand_shape.dimensions_size(); ++i) {
if (i != input_dim) {
gather_dim_numbers.add_offset_dims(i);
}
}
gather_dim_numbers.add_start_index_map(input_dim);
gather_dim_numbers.set_index_vector_dim(1);
gather_dim_numbers.add_collapsed_slice_dims(input_dim);
HloInstruction* operand_static_dim_size =
reshape->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(operand_shape.dimensions(input_dim))));
HloInstruction* operand_static =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
operand_shape, reshape->mutable_operand(0), operand_static_dim_size,
input_dim));
std::vector<int64_t> slice_sizes(operand_shape.dimensions().begin(),
operand_shape.dimensions().end());
slice_sizes[input_dim] = 1;
HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(
ShapeUtil::MakeShape(operand_shape.element_type(),
operand_shape.dimensions()),
operand_static, cumsum, gather_dim_numbers, slice_sizes, true));
TF_RETURN_IF_ERROR(reshape->ReplaceOperandWith(0, gather));
HloInstruction* reshape_dynamic = reshape;
auto users = reshape->users();
for (int64_t output_dim : output_dims) {
HloInstruction* output_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);
if (output_dynamic_size != nullptr) {
reshape_dynamic =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
reshape->shape(), reshape_dynamic, output_dynamic_size,
output_dim));
}
}
for (auto* user : users) {
TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, reshape_dynamic));
}
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, reshape_dynamic, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshapeCombineInput(
HloInstruction* reshape, absl::Span<const int64_t> input_dims,
int64_t output_dim, absl::Span<HloInstruction*> input_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* zero = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
HloInstruction* one = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
const Shape output_shape = reshape->shape();
const Shape input_shape = reshape->operand(0)->shape();
const Shape mask_output_shape =
ShapeUtil::MakeShape(xla::S32, {output_shape.dimensions(output_dim)});
HloInstruction* output_shape_binary_mask =
GenerateBinaryMask(reshape, output_dim, input_dims, input_dynamic_dims,
one, zero, false);
if (output_shape_binary_mask == nullptr) {
VLOG(2) << "No need to rewrite";
return false;
}
HloInstruction* iota =
reshape->AddInstruction(HloInstruction::CreateIota(mask_output_shape, 0));
HloComputation::Builder comp_builder("compare");
HloInstruction* lhs_key =
comp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(S32), "lhs_key"));
HloInstruction* rhs_key =
comp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeScalarShape(S32), "rhs_key"));
comp_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeScalarShape(S32), "lhs_value"));
comp_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeScalarShape(S32), "rhs_value"));
comp_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), lhs_key,
rhs_key, ComparisonDirection::kGt));
HloComputation* compare =
reshape->GetModule()->AddEmbeddedComputation(comp_builder.Build());
HloInstruction* sort = reshape->AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({mask_output_shape, mask_output_shape}), 0,
{output_shape_binary_mask, iota}, compare,
true));
HloInstruction* gather_indices = reshape->AddInstruction(
HloInstruction::CreateGetTupleElement(mask_output_shape, sort, 1));
GatherDimensionNumbers gather_dim_numbers;
for (int64_t i = 0; i < output_shape.dimensions_size(); ++i) {
if | #include "xla/service/dynamic_padder.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/client/xla_builder.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/llvm_irgen_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace {
namespace m = ::xla::match;
namespace op = xla::testing::opcode_matchers;
OpDynamismSupport OpHasDynamismSupport(HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCustomCall) {
return OpDynamismSupport::kNoSupport;
}
if (hlo->custom_call_target() == "OpWithDynamicLowering") {
return OpDynamismSupport::kRequired;
}
return OpDynamismSupport::kNoSupport;
}
absl::Status CustomCallDynamicDimensionInference(
HloInstruction* hlo, DynamicDimensionInference* inferencer) {
if (hlo->custom_call_target() == "OpWithDynamicLowering") {
if (hlo->shape().IsTuple()) {
HloInstruction* dynamic_size =
inferencer->GetDynamicSize(hlo->mutable_operand(0), {1}, 0);
inferencer->SetDynamicSize(hlo, {1}, 0, dynamic_size);
} else {
HloInstruction* dynamic_size =
inferencer->GetDynamicSize(hlo->mutable_operand(0), {}, 0);
inferencer->SetDynamicSize(hlo, {}, 0, dynamic_size);
}
}
return absl::OkStatus();
}
class DynamicPadderTest : public HloTestBase {
protected:
DynamicPadderTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); }
std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
return module;
}
absl::StatusOr<bool> RunPadder(
bool slice_dynamic_output = false,
OpSupportsDynamismHandler op_supports_dynamism_handler =
OpHasDynamismSupport,
DynamicDimensionInference::CustomCallInferenceHandler
custom_call_handler = CustomCallDynamicDimensionInference) {
DynamicPadderOptions options;
options.slice_dynamic_output = slice_dynamic_output;
options.op_supports_dynamism_handler =
std::move(op_supports_dynamism_handler);
options.custom_call_handler = std::move(custom_call_handler);
DynamicPadder padder(std::move(options));
TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&padder, module_.get()));
if (!changed) return false;
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(RunHloPass(&tuple_simplifier, module_.get()).status());
AlgebraicSimplifier alg_simplifier(AlgebraicSimplifierOptions{});
TF_RETURN_IF_ERROR(RunHloPass(&alg_simplifier, module_.get()).status());
return true;
}
void ExpectPadded(const HloInstruction* inst) {
EXPECT_THAT(inst,
op::Select(op::Lt(op::Iota(), op::Broadcast(op::Parameter())),
::testing::_, op::Broadcast()));
}
HloComputation* GetScalarAddComputation() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
std::unique_ptr<HloModule> module_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});
};
class MemoryAlignmentTest : public HloTestBase {};
TEST_F(MemoryAlignmentTest, DISABLED_ON_CPU(TestDataTypeFP16)) {
const std::string hlo_text = R"(
HloModule TestDataTypeFP16
update_add (p0: f16[], p1: f16[]) -> f16[] {
p0 = f16[] parameter(0)
p1 = f16[] parameter(1)
ROOT out = f16[] add(p0, p1)
}
ENTRY main () -> f16[<=1,1] {
c1 = s32[1]{0} constant({1})
c2 = f16[1,1]{1,0} constant({ {0.099976} })
shape = s32[] reshape(s32[1]{0} c1)
dim_size = f16[<=1,1]{1,0} set-dimension-size(f16[1,1]{1,0} c2, s32[] shape),
dimensions={0}
ROOT out = f16[<=1,1]{1,0} scatter(f16[<=1,1]{1,0} dim_size, s32[1]{0} c1, f16[1,1]{1,0} c2),
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
to_apply=update_add
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
TEST_F(DynamicPadderTest, ReduceTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 2));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, negate, init, {0, 2}, GetScalarAddComputation()));
EXPECT_FALSE(module_->is_dynamic());
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
ExpectPadded(reduce->operand(0));
EXPECT_TRUE(module_->is_dynamic());
}
TEST_F(DynamicPadderTest, DynamicLoweringTest) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
custom-call.1 = s32[<=5] custom-call(param_padded),
custom_call_target="OpWithDynamicLowering"
custom-call.2 = s32[<=5] custom-call(custom-call.1),
custom_call_target="OpWithDynamicLowering"
ROOT negate = s32[<=5] negate(custom-call.2)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
auto custom_call_1 =
module_->entry_computation()->GetInstructionWithName("custom-call.1");
auto custom_call_2 =
module_->entry_computation()->GetInstructionWithName("custom-call.2");
HloInstruction* slice_to_dynamic = custom_call_1->mutable_operand(0);
ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic");
ASSERT_EQ(custom_call_2->user_count(), 1);
HloInstruction* pad_to_static = custom_call_2->users()[0];
ASSERT_THAT(pad_to_static->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(pad_to_static->custom_call_target(), "PadToStatic");
slice_to_dynamic = module_->entry_computation()->root_instruction();
ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic");
}
TEST_F(DynamicPadderTest, DynamicLoweringTestTupleInput) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
tuple_arg = (s32[], s32[<=5]) tuple(const, param_padded)
custom-call.1 = (s32[], s32[<=5]) custom-call(tuple_arg),
custom_call_target="OpWithDynamicLowering"
custom-call.2 = (s32[], s32[<=5]) custom-call(custom-call.1),
custom_call_target="OpWithDynamicLowering"
data = s32[<=5]{0} get-tuple-element(custom-call.2), index=1
ROOT negate = s32[<=5] negate(data)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_THAT(root, op::CustomCall(
{"SliceToDynamic"}, op::Negate(),
op::GetTupleElement(op::CustomCall({"PadToStatic"}))));
HloInstruction* negate = root->mutable_operand(0);
EXPECT_THAT(
negate,
op::Negate(op::GetTupleElement(op::CustomCall(
{"PadToStatic"}, op::GetTupleElement(op::CustomCall(
{"OpWithDynamicLowering"}, ::testing::_))))));
auto custom_call_1 =
module_->entry_computation()->GetInstructionWithName("custom-call.1");
EXPECT_THAT(custom_call_1,
op::CustomCall({"OpWithDynamicLowering"},
op::Tuple(op::Constant(),
op::CustomCall({"SliceToDynamic"}))));
}
TEST_F(DynamicPadderTest, DynamicOutputNestedTuple) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
const2 = s32[] constant(4)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
tuple0 = (s32[], s32[<=5]) tuple(const, param_padded)
ROOT tuple1 = (s32[], (s32[], s32[<=5])) tuple(const2, tuple0)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
TF_ASSERT_OK(TupleSimplifier().Run(module_.get()).status());
XLA_LOG_LINES(0, module_->ToString());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::Constant(), op::Tuple()));
HloInstruction* nested_tuple = root->mutable_operand(1);
EXPECT_THAT(nested_tuple,
op::Tuple(op::Constant(), op::CustomCall({"SliceToDynamic"})));
}
TEST_F(DynamicPadderTest, ConvolutionTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim});
auto xy_shape_dynamic =
ShapeUtil::MakeShape(F32, {xdim, ydim}, {false, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_shape_dynamic, a_param, size_param, 1));
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
ExpectPadded(conv->operand(0));
}
TEST_F(DynamicPadderTest, ConvolutionNoPad) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
EXPECT_THAT(conv->operand(0), op::Parameter());
}
TEST_F(DynamicPadderTest, ReduceWindowNoPadForTrivialWindow) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});
auto reduce_shape = ShapeUtil::MakeShape(F32, {3, 5}, {false, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {false, true});
auto input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "input"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, input, size_param, 1));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
TF_ASSERT_OK_AND_ASSIGN(Window window, ParseWindow("size=2x1 pad=0_0x0_0"));
auto output = builder.AddInstruction(HloInstruction::CreateReduceWindow(
reduce_shape, input, init, window, GetScalarAddComputation()));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
EXPECT_THAT(output->operand(0), op::Parameter());
}
TEST_F(DynamicPadderTest, VariadicReduceWindowNoPadForTrivialWindow) {
const std::string hlo_text = R"(
HloModule VariadicReduceWindowNoPadForTrivialWindow
add_f32 (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {
a = f32[] parameter(0)
b = s32[] parameter(1)
c = f32[] parameter(2)
d = s32[] parameter(3)
add.0 = f32[] add(a, c)
add.1 = s32[] add(b, d)
ROOT out = tuple(add.0, add.1)
}
ENTRY main {
input.0 = f32[4, 5] parameter(0)
input.1 = s32[4, 5] parameter(1)
size_param.0 = s32[] parameter(2)
size_param.1 = s32[] parameter(3)
input_dynamic.0 = f32[4,<=5] set-dimension-size(input.0, size_param.0), dimensions={1}
input_dynamic.1 = s32[4,<=5] set-dimension-size(input.1, size_param.0), dimensions={1}
init.0 = f32[] constant(0.0)
init.1 = s32[] constant(0)
ROOT output = (f32[3, <=5], s32[3, <=5]) reduce-window(input_dynamic.0, input_dynamic.1, init.0, init.1), window={size=2x1 pad=0_0x0_0}, to_apply=add_f32
}
)";
const int kNumParams = 2;
module_ = ParseAndReturnVerifiedModule(hlo_text).value();
TF_ASSERT_OK(RunPadder().status());
for (int i = 0; i < kNumParams; ++i) {
EXPECT_THAT(module_->entry_computation()->root_instruction()->operand(i),
op::Parameter());
}
}
TEST_F(DynamicPadderTest, PadS8ToS32Dot) {
const std::string hlo_text = R"(
HloModule test
ENTRY test {
a = s8[<=16,32] parameter(0)
b = s8[32,64] parameter(1)
ROOT root = s32[<=16,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
EXPECT_THAT(module_->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall({"SliceToDynamic"},
m::Dot(m::Op().WithShape(S8, {16, 32}),
m::Op().WithShape(S8, {32, 64}))
.WithShape(S32, {16, 64}),
m::Op(), m::Op())));
}
TEST_F(DynamicPadderTest, PadToStaticForCustomCall) {
const std::string hlo_text = R"(
HloModule test
ENTRY test {
a = f32[64] parameter(0)
ROOT c = f32[<=128] custom-call(a),
custom_call_target="UnknownOp"
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
EXPECT_THAT(module_->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall({"UnknownOp"})));
}
TEST_F(DynamicPadderTest, WhileLoopDynamicShapeChangeToStatic) {
const std::string hlo_text = R"(
HloModule WhileLoopDynamicShapeChangeToStatic
%cond_wrapper.19447 {
param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)
%get-tuple-element.184 = s32[] get-tuple-element(param), index=0
%get-tuple-element.185 = s32[] get-tuple-element(param), index=1
ROOT %compare.28 = pred[] compare(s32[] %get-tuple-element.184, s32[] %get-tuple-element.185), direction=LT
}
%while_body_78894_grad_83711__.18882 {
param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)
%get-tuple-element.184 = s32[] get-tuple-element(param), index=0
%get-tuple-element.185 = s32[] get-tuple-element(param), index=1
%add.1 = s32[] add(get-tuple-element.184, get-tuple-element.184)
%gte.2 = f32[] get-tuple-element(param), index=2
%broadcast.19389 = f32[32,216]{1,0} broadcast(f32[] %gte.2), dimensions={}
%constant.32 = s32[] constant(32)
%set-dimension-size = f32[<=32,216]{1,0} set-dimension-size(f32[32,216]{1,0} %broadcast.19389, s32[] %constant.32), dimensions={0}
ROOT tuple = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(add.1, %get-tuple-element.185, %gte.2, %set-dimension-size)
}
ENTRY main {
param = f32[] parameter(0)
param.1 = f32[<=32,216]{1,0} parameter(1)
const = s32[] constant(3)
const2 = s32[] constant(4)
%tuple.18877 = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(const, const2, param, param.1)
%while.19451 = (s32[], s32[], f32[], f32[<=32,216]{1,0})
while((s32[], s32[], f32[], f32[<=32,216]{1,0})
%tuple.18877), condition=%cond_wrapper.19447, body=%while_body_78894_grad_83711__.18882
ROOT result = f32[<=32,216]{1,0} get-tuple-element(while.19451), index=3
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
XLA_LOG_LINES(0, module_->ToString());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_EQ(root->shape(), ShapeUtil::MakeShape(F32, {32, 216}, {true, false}));
HloInstruction* while_inst = nullptr;
for (HloInstruction* inst :
module_->entry_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_inst, nullptr)
<< "while_inst: " << while_inst->name() << ", inst: " << inst->name();
while_inst = inst;
}
}
EXPECT_EQ(while_inst->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeScalarShape(S32),
ShapeUtil::MakeScalarShape(S32),
ShapeUtil::MakeScalarShape(F32),
ShapeUtil::MakeShape(F32, {32, 216}),
ShapeUtil::MakeScalarShape(S32)}));
}
TEST_F(DynamicPadderTest, WhileLoopCarriesRequiredDynamicShape) {
const std::string hlo_text = R"(
HloModule WhileLoopCarriesRequiredDynamicShape
%cond {
param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)
current = s32[] get-tuple-element(param), index=5
last = s32[] get-tuple-element(param), index=6
ROOT result = pred[] compare(current, last), direction=LT
}
%body {
param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)
var = f32[1024] get-tuple-element(param), index=0
input0 = f32[<=64] get-tuple-element(param), index=1
grad0 = f32[32] get-tuple-element(param), index=2
input1 = f32[<=64] get-tuple-element(param), index=3
act1 = f32[32] get-tuple-element(param), index=4
grad1 = f32[32] custom-call(act1), custom_call_target="ComputeGradients"
var1 = f32[1024] custom-call(var, input0, grad0), custom_call_target="ApplyGradients", output_to_operand_aliasing={{}: (0, {})}
token2 = token[] get-tuple-element(param), index=7
infeed2 = (f32[<=64], token[]) infeed(token2)
input2 = f32[<=64] get-tuple-element(infeed2), index=0
act2 = f32[32] custom-call(var1, input2), custom_call_target="ComputeActivations"
current = s32[] get-tuple-element(param), index=5
constant1 = s32[] constant(1)
add = s32[] add(current, constant1)
last = s32[] get-tuple-element(param), index=6
token3 = token[] get-tuple-element(infeed2), index=1
ROOT result = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) tuple(var1, input1, grad1, input2, act2, add, last, token3)
}
ENTRY main {
last = s32[] parameter(0)
var = f32[1024] parameter(1)
token0 = token[] after-all()
infeed0 = (f32[<=64], token[]) infeed(token0)
input0 = f32[<=64] get-tuple-element(infeed0), index=0
act0 = f32[32] custom-call(var, input0), custom_call_target="ComputeActivations"
grad0 = f32[32] custom-call(act0), custom_call_target="ComputeGradients"
token1 = token[] get-tuple-element(infeed0), index=1
infeed1 = (f32[<=64], token[]) infeed(token1)
input1 = f32[<=64] get-tuple-element(infeed1), index=0
act1 = f32[32] custom-call(var, input1), custom_call_target="ComputeActivations"
token2 = token[] get-tuple-element(infeed1), index=1
zero = s32[] constant(0)
tuple = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) tuple(var, input0, grad0, input1, act1, zero, last, token2)
while = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) while(tuple), condition=%cond, body=%body
ROOT result = f32[1024] get-tuple-element(while), index=0
}
)";
module_ = GetHloModule(hlo_text);
auto op_supports_dynamism = [](HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCustomCall) {
return OpDynamismSupport::kNoSupport;
}
if (hlo->custom_call_target() == "ComputeActivations" ||
hlo->custom_call_target() == "ApplyGradients") {
return OpDynamismSupport::kRequired;
}
return OpDynamismSupport::kNoSupport;
};
auto custom_call_handler = [](HloInstruction* hlo,
DynamicDimensionInference* inference) {
return absl::OkStatus();
};
TF_ASSERT_OK(
RunPadder(
true,
std::move(op_supports_dynamism),
std::move(custom_call_handler))
.status());
XLA_LOG_LINES(1, module_->ToString());
for (HloComputation* computation : module_->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCustomCall) {
EXPECT_NE(instruction->custom_call_target(), "PadToStatic");
EXPECT_NE(instruction->custom_call_target(), "SliceToDynamic");
if (instruction->custom_call_target() == "ComputeActivations") {
EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());
} else if (instruction->custom_call_target() == "ApplyGradients") {
EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
const Shape& shape = instruction->shape();
EXPECT_TRUE(shape.tuple_shapes(1).is_dynamic());
EXPECT_TRUE(shape.tuple_shapes(3).is_dynamic());
}
}
}
}
TEST_F(DynamicPadderTest, HandleReshapeCheckPastReshape) {
auto hlo_text = R"(
HloModule ReshapeDynamicDimension
ENTRY main {
p0 = f32[4,511,432]{2,1,0} parameter(0)
p1 = s32[] parameter(1)
p2 = f32[432,337]{1,0:T(8,128)} parameter(2)
p0_dynamic = f32[<=4,511,432] set-dimension-size(p0, p1), dimensions={0}
reshape.4179 = f32[<=2044,432]{1,0} reshape(p0_dynamic)
dot.4180 = f32[<=2044,337]{1,0} dot(reshape.4179, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
transpose.4181 = f32[<=2044,337]{1,0} transpose(dot.4180), dimensions={0,1}
ROOT reshape.4183 = f32[<=4,511,337]{2,1,0} reshape(transpose.4181)
})";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
VLOG(3) << module_->ToString();
CHECK(module_->is_dynamic());
CHECK(module_->entry_computation()
->root_instruction()
->shape()
.is_dynamic_dimension(0));
}
class ExecutionTest : public HloTestBase {
protected:
std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
return module;
}
Literal PadAndExecute(std::unique_ptr<HloModule> module,
absl::Span<Literal* const> arguments,
bool slice_dynamic_output = true) {
if (!slice_dynamic_output) {
auto new_config = module->config();
new_config.mutable_entry_computation_layout()
->mutable_result_layout()
->ClearDynamicShape();
module->set_config(new_config);
}
DynamicPadderOptions options;
options.slice_dynamic_output = slice_dynamic_output;
DynamicPadder padder(options);
TF_CHECK_OK(padder.Run(module.get()).status());
HloDCE dce;
TF_CHECK_OK(dce.Run(module.get()).status());
return ExecuteAndTransfer(std::move(module), arguments);
}
};
XLA_TEST_F(ExecutionTest, ScatterUpdate) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[INDICES_BOUND] parameter(1)
updates = s32[INDICES_BOUND,3] parameter(2)
dynamic_size = s32[] parameter(3)
indices_dynamic = s32[<=INDICES_BOUND] set-dimension-size(indices, dynamic_size), dimensions={0}
updates_dynamic = s32[<=INDICES_BOUND,3] set-dimension-size(updates, dynamic_size), dimensions={0}
ROOT scatter = s32[3,3] scatter(operand, indices_dynamic, updates_dynamic),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
const std::string hlo_text_not_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}});
auto module_not_padded = GetHloModule(hlo_text_not_padded);
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2);
Literal not_padded =
ExecuteAndTransfer(std::move(module_not_padded),
{&operand, &scatter_indices, &updates, &dynamic_size});
const std::string hlo_text_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}});
auto module_padded = GetHloModule(hlo_text_padded);
Literal scatter_indices_padded = LiteralUtil::CreateR1<int32_t>({0, 2, 0, 4});
Literal updates_padded = LiteralUtil::CreateR2<int32_t>(
{{10, 20, 30}, {70, 80, 90}, {30, 22, 11}, {-1, 20, -1}});
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
Literal padded = PadAndExecute(
std::move(module_padded),
{&operand, &scatter_indices_padded, &updates_padded, &dynamic_size});
EXPECT_EQ(padded, not_padded);
}
XLA_TEST_F(ExecutionTest, ScatterUpdateWindowDim) {
const std::string hlo_text = R"(
HloModule ScatterUpdateWindowDim
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[1,2,3] parameter(0)
indices = s32[1] parameter(1)
updates = s32[2,3,1] parameter(2)
dynamic_size = s32[] constant(1)
operand_dynamic = s32[1, <=2, 3] set-dimension-size(operand, dynamic_size),
dimensions={1}
updates_dynamic = s32[<=2, 3, 1] set-dimension-size(updates, dynamic_size),
dimensions={0}
ROOT scatter = s32[1, <=2, 3] scatter(operand_dynamic, indices, updates_dynamic),
to_apply=update_s32,
update_window_dims={0, 1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
auto hlo_module = GetHloModule(hlo_text);
Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, 0, 0}, {0, 0, 0}}});
Literal sca |
1,974 | cpp | tensorflow/tensorflow | all_reduce_folder | third_party/xla/xla/service/all_reduce_folder.cc | third_party/xla/xla/service/all_reduce_folder_test.cc | #ifndef XLA_SERVICE_ALL_REDUCE_FOLDER_H_
#define XLA_SERVICE_ALL_REDUCE_FOLDER_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceFolder : public HloModulePass {
public:
absl::string_view name() const override { return "all-reduce-folder"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/all_reduce_folder.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
std::optional<std::vector<ReplicaGroup>> FoldReplicaGroups(
absl::Span<const ReplicaGroup> replica_groups0,
absl::Span<const ReplicaGroup> replica_groups1) {
int64_t num_replicas = 0;
for (const ReplicaGroup &rg : replica_groups0) {
for (int64_t id : rg.replica_ids()) {
num_replicas = std::max(num_replicas, id);
}
}
num_replicas++;
std::vector<int> replica_group_no(num_replicas, -1);
for (int group_no = 0; group_no < replica_groups0.size(); ++group_no) {
for (int64_t id : replica_groups0[group_no].replica_ids()) {
replica_group_no[id] = group_no;
}
}
absl::flat_hash_map<std::vector<bool>, int64_t> contributor_set_id;
std::vector<int64_t> contributing_replicas_set_id(num_replicas, 0);
int64_t next_id = 1;
for (const ReplicaGroup &rg : replica_groups1) {
std::vector<bool> contributors(num_replicas, false);
for (int64_t id : rg.replica_ids()) {
int64_t group_no = replica_group_no[id];
for (int64_t contrib : replica_groups0[group_no].replica_ids()) {
if (contributors[contrib]) {
return std::nullopt;
}
contributors[contrib] = true;
}
}
int64_t set_id;
auto it = contributor_set_id.find(contributors);
if (it != contributor_set_id.end()) {
set_id = it->second;
} else {
set_id = next_id++;
contributor_set_id[contributors] = set_id;
}
for (int64_t id : rg.replica_ids()) {
contributing_replicas_set_id[id] = set_id;
}
}
std::vector<ReplicaGroup> new_replica_groups;
new_replica_groups.reserve(contributor_set_id.size());
for (const auto &it : contributor_set_id) {
const std::vector<bool> &contributors = it.first;
const int64_t set_id = it.second;
new_replica_groups.emplace_back();
ReplicaGroup &group = new_replica_groups.back();
for (int64_t replica = 0; replica < num_replicas; ++replica) {
if (contributors[replica]) {
if (contributing_replicas_set_id[replica] != set_id) {
return std::nullopt;
}
group.add_replica_ids(replica);
}
}
}
absl::c_sort(new_replica_groups,
[](const ReplicaGroup &a, const ReplicaGroup &b) {
return a.replica_ids(0) < b.replica_ids(0);
});
return new_replica_groups;
}
}
absl::StatusOr<bool> AllReduceFolder::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1) << "Skip AllReduceFolder because the module contains all-reduce "
"with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kAllReduce ||
inst->operand(0)->opcode() != HloOpcode::kAllReduce) {
continue;
}
auto *ar0 = Cast<HloAllReduceInstruction>(inst->mutable_operand(0));
auto *ar1 = Cast<HloAllReduceInstruction>(inst);
if (ar0->user_count() != 1) {
continue;
}
std::optional<AllReduceKey> key0 = GetAllReduceKey(
ar0, nullptr, true);
std::optional<AllReduceKey> key1 = GetAllReduceKey(
ar1, nullptr, true);
if (!key0 || !key1 || *key0 != *key1 || ar0->replica_groups().empty() ||
ar1->replica_groups().empty()) {
continue;
}
std::optional<std::vector<ReplicaGroup>> new_replica_groups =
FoldReplicaGroups(ar0->replica_groups(), ar1->replica_groups());
if (!new_replica_groups) {
continue;
}
std::optional<int64_t> channel_id;
if (ar0->channel_id()) {
channel_id = next_channel_id++;
}
HloInstruction *new_ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
ar0->shape(), ar0->operands(), ar0->to_apply(),
CollectiveDeviceList(*new_replica_groups),
false, channel_id,
ar0->use_global_device_ids()));
TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(new_ar));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_reduce_folder.h"
#include <cstddef>
#include <iostream>
#include <memory>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
using ::testing::HasSubstr;
class AllReduceFolderTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed = AllReduceFolder().Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule> &module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
}
};
TEST_F(AllReduceFolderTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(AllReduceCount(module), 1);
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::Parameter(0)));
EXPECT_THAT(root->ToString(), HasSubstr("replica_groups={{0,1,2,3}}"));
}
TEST_F(AllReduceFolderTest, SimpleSwap) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,2},{1,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,1},{2,3}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(AllReduceCount(module), 1);
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::Parameter(0)));
EXPECT_THAT(root->ToString(), HasSubstr("replica_groups={{0,1,2,3}}"));
}
TEST_F(AllReduceFolderTest, EmptyReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceFolderTest, MismatchOtherProperties0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, channel_id=1, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceFolderTest, MismatchOtherProperties1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
mul {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT mul = f32[] multiply(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=mul
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceFolderTest, NotFoldable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,1},{2,3}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceFolderTest, Foldable0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,4},{1,5},{2,3},{6,7}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,5},{4,1},{2,7},{3,6}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_EQ(AllReduceCount(module), 1);
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::Parameter(0)));
EXPECT_THAT(root->ToString(),
HasSubstr("replica_groups={{0,1,4,5},{2,3,6,7}}"));
}
TEST_F(AllReduceFolderTest, FoldableChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=sum
ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3},{4,6},{5,7}}, to_apply=sum
ROOT ar2 = f32[8] all-reduce(ar1), replica_groups={{0,4},{1,5},{2,6},{3,7}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
std::cerr << module->ToString();
EXPECT_EQ(AllReduceCount(module), 1);
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::Parameter(0)));
EXPECT_THAT(root->ToString(),
HasSubstr("replica_groups={{0,1,2,3,4,5,6,7}}"));
}
}
} |
1,975 | cpp | tensorflow/tensorflow | stream_pool | third_party/xla/xla/service/stream_pool.cc | third_party/xla/xla/service/stream_pool_test.cc | #ifndef XLA_SERVICE_STREAM_POOL_H_
#define XLA_SERVICE_STREAM_POOL_H_
#include <memory>
#include <unordered_map>
#include <vector>
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace se = ::stream_executor;
class StreamPool {
public:
struct PtrDeleter {
void operator()(se::Stream* stream) { pool->ReturnStream(stream); }
StreamPool* pool;
};
using Ptr = std::unique_ptr<se::Stream, PtrDeleter>;
explicit StreamPool(se::StreamExecutor* executor) : executor_(executor) {}
Ptr BorrowStream(se::StreamPriority priority = se::StreamPriority::Default);
private:
void ReturnStream(se::Stream* stream);
absl::Mutex mu_;
std::unordered_map<se::StreamPriority,
std::vector<std::unique_ptr<se::Stream>>>
streams_with_pri_ ABSL_GUARDED_BY(mu_);
se::StreamExecutor* executor_;
};
}
#endif
#include "xla/service/stream_pool.h"
#include <memory>
#include <utility>
#include "absl/strings/str_format.h"
namespace xla {
StreamPool::Ptr StreamPool::BorrowStream(se::StreamPriority priority) {
std::unique_ptr<se::Stream> stream;
{
absl::MutexLock lock(&mu_);
if (streams_with_pri_.find(priority) == streams_with_pri_.end()) {
stream = nullptr;
} else {
while (!streams_with_pri_[priority].empty() && !stream) {
stream = std::move(streams_with_pri_[priority].back());
streams_with_pri_[priority].pop_back();
if (stream->ok()) {
VLOG(1) << absl::StrFormat(
"StreamPool reusing existing stream (%p) with priority: %s",
stream.get(), se::StreamPriorityToString(priority));
} else {
VLOG(1) << absl::StrFormat(
"Stream (%p) was not ok, deleting with : %s", stream.get(),
se::StreamPriorityToString(priority));
stream = nullptr;
}
}
}
}
if (!stream) {
stream = executor_->CreateStream(priority).value();
VLOG(1) << absl::StrFormat("Created new stream (%p) with priority = %s",
stream.get(),
se::StreamPriorityToString(priority));
}
PtrDeleter deleter = {this};
return Ptr(stream.release(), deleter);
}
void StreamPool::ReturnStream(se::Stream* stream) {
if (stream->ok()) {
VLOG(1) << absl::StrFormat("StreamPool returning ok stream (%p)", stream);
absl::MutexLock lock(&mu_);
auto priority = std::get<se::StreamPriority>(stream->priority());
streams_with_pri_[priority].emplace_back(stream);
} else {
VLOG(1) << absl::StrFormat("StreamPool deleting !ok stream (%p)", stream);
delete stream;
}
}
} | #include "xla/service/stream_pool.h"
#include <memory>
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/test_helpers.h"
namespace xla {
namespace {
class StreamPoolTest : public ::testing::Test {
protected:
std::unique_ptr<se::StreamExecutor> NewStreamExecutor() {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
se::StreamExecutorConfig config(0);
return platform->GetUncachedExecutor(config).value();
}
};
TEST_F(StreamPoolTest, EmptyPool) {
std::unique_ptr<se::StreamExecutor> executor = NewStreamExecutor();
StreamPool pool(executor.get());
}
TEST_F(StreamPoolTest, OneStreamPool) {
std::unique_ptr<se::StreamExecutor> executor = NewStreamExecutor();
StreamPool pool(executor.get());
StreamPool::Ptr stream1 = pool.BorrowStream();
se::Stream* stream1_ptr = stream1.get();
EXPECT_TRUE(stream1->ok());
stream1 = nullptr;
StreamPool::Ptr stream2 = pool.BorrowStream();
se::Stream* stream2_ptr = stream2.get();
EXPECT_TRUE(stream2->ok());
stream2 = nullptr;
EXPECT_EQ(stream1_ptr, stream2_ptr);
}
TEST_F(StreamPoolTest, TwoStreamPool) {
std::unique_ptr<se::StreamExecutor> executor = NewStreamExecutor();
StreamPool pool(executor.get());
StreamPool::Ptr stream1 = pool.BorrowStream();
se::Stream* stream1_ptr = stream1.get();
EXPECT_TRUE(stream1->ok());
StreamPool::Ptr stream2 = pool.BorrowStream();
se::Stream* stream2_ptr = stream2.get();
EXPECT_TRUE(stream2->ok());
EXPECT_NE(stream1_ptr, stream2_ptr);
stream1 = nullptr;
StreamPool::Ptr stream3 = pool.BorrowStream();
se::Stream* stream3_ptr = stream3.get();
EXPECT_TRUE(stream3->ok());
EXPECT_EQ(stream1_ptr, stream3_ptr);
EXPECT_NE(stream2_ptr, stream3_ptr);
stream2 = nullptr;
StreamPool::Ptr stream4 = pool.BorrowStream();
se::Stream* stream4_ptr = stream4.get();
EXPECT_TRUE(stream4->ok());
EXPECT_EQ(stream2_ptr, stream4_ptr);
EXPECT_NE(stream3_ptr, stream4_ptr);
}
}
} |
1,976 | cpp | tensorflow/tensorflow | llvm_compiler | third_party/xla/xla/service/llvm_compiler.cc | third_party/xla/xla/tests/llvm_compiler_test.cc | #ifndef XLA_SERVICE_LLVM_COMPILER_H_
#define XLA_SERVICE_LLVM_COMPILER_H_
#include "llvm/IR/Module.h"
#include "xla/service/compiler.h"
namespace xla {
class LLVMCompiler : public Compiler {
public:
~LLVMCompiler() override {}
using ModuleHook = std::function<void(const llvm::Module&)>;
void SetPreOptimizationHook(ModuleHook hook) {
CHECK(!user_pre_optimization_hook_)
<< "Pre-optimization hook is already set";
CHECK(hook) << "hook cannot be null";
user_pre_optimization_hook_ = hook;
}
void RemovePreOptimizationHook() { user_pre_optimization_hook_ = nullptr; }
void SetPostOptimizationHook(ModuleHook hook) {
CHECK(!user_post_optimization_hook_)
<< "Post-optimization hook is already set";
CHECK(hook) << "hook cannot be null";
user_post_optimization_hook_ = hook;
}
void RemovePostOptimizationHook() { user_post_optimization_hook_ = nullptr; }
using Compiler::Compile;
using Compiler::RunBackend;
using Compiler::RunHloPasses;
absl::StatusOr<std::vector<std::unique_ptr<Executable>>> Compile(
std::unique_ptr<HloModuleGroup> module_group,
std::vector<std::vector<se::StreamExecutor*>> stream_execs,
const CompileOptions& options) override;
protected:
ModuleHook user_pre_optimization_hook_;
ModuleHook user_post_optimization_hook_;
};
}
#endif
#include "xla/service/llvm_compiler.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "tsl/platform/denormal.h"
#include "tsl/profiler/lib/scoped_annotation.h"
#ifdef __FAST_MATH__
#error "Don't build XLA with -ffast-math"
#endif
namespace xla {
absl::StatusOr<std::vector<std::unique_ptr<Executable>>> LLVMCompiler::Compile(
std::unique_ptr<HloModuleGroup> module_group,
std::vector<std::vector<se::StreamExecutor*>> stream_execs,
const CompileOptions& options) {
tsl::port::ScopedDontFlushDenormal dont_flush_denormals;
std::vector<std::unique_ptr<Executable>> result;
std::vector<std::unique_ptr<HloModule>> modules =
module_group->ConsumeModules();
for (size_t i = 0; i < modules.size(); i++) {
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#",
modules[i]->name(), modules[i]->unique_id());
}};
TF_ASSIGN_OR_RETURN(modules[i], RunHloPasses(std::move(modules[i]),
stream_execs[i][0], options));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
RunBackend(std::move(modules[i]), stream_execs[i][0], options));
result.push_back(std::move(executable));
}
return std::move(result);
}
} | #include "xla/service/llvm_compiler.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "llvm/IR/Module.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/literal_util.h"
#include "xla/service/backend.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using LLVMCompilerTest = HloTestBase;
const char* const kHloText = R"(
HloModule Constant
ENTRY main {
ROOT constant = f32[] constant(42.0)
}
)";
TEST_F(LLVMCompilerTest, HooksTest) {
int pre_opt_hook_call_count = 0;
int post_opt_hook_call_count = 0;
auto pre_opt_hook = [&pre_opt_hook_call_count](const llvm::Module&) {
++pre_opt_hook_call_count;
return absl::OkStatus();
};
auto post_opt_hook = [&post_opt_hook_call_count](const llvm::Module&) {
++post_opt_hook_call_count;
return absl::OkStatus();
};
auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();
LLVMCompiler* compiler =
tensorflow::down_cast<xla::LLVMCompiler*>(backend().compiler());
compiler->SetPreOptimizationHook(pre_opt_hook);
compiler->SetPostOptimizationHook(post_opt_hook);
ASSERT_TRUE(compiler
->RunBackend(std::move(hlo_module),
backend().default_stream_executor(),
nullptr)
.ok());
EXPECT_EQ(1, pre_opt_hook_call_count);
EXPECT_EQ(1, post_opt_hook_call_count);
}
TEST_F(LLVMCompilerTest, DISABLED_MultiModuleCompilation) {
auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();
auto hlo_module2 = ParseAndReturnVerifiedModule(kHloText).value();
std::vector<std::unique_ptr<HloModule>> modules;
modules.push_back(std::move(hlo_module));
modules.push_back(std::move(hlo_module2));
auto module_group =
std::make_unique<HloModuleGroup>("test_module_group", std::move(modules));
std::vector<std::vector<se::StreamExecutor*>> executors;
executors.push_back({backend().default_stream_executor()});
executors.push_back({backend().default_stream_executor()});
EXPECT_IS_OK(backend().compiler()->Compile(std::move(module_group),
std::move(executors),
backend().memory_allocator()));
}
}
} |
1,977 | cpp | tensorflow/tensorflow | conditional_to_select | third_party/xla/xla/service/conditional_to_select.cc | third_party/xla/xla/service/conditional_to_select_test.cc | #ifndef XLA_SERVICE_CONDITIONAL_TO_SELECT_H_
#define XLA_SERVICE_CONDITIONAL_TO_SELECT_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConditionalToSelect : public HloModulePass {
public:
~ConditionalToSelect() override = default;
absl::string_view name() const override { return "conditional-to-select"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/conditional_to_select.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/call_inliner.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
static absl::StatusOr<bool> DoConditionalToSelect(HloInstruction* conditional) {
if (conditional->true_computation()->HasSideEffect() ||
conditional->false_computation()->HasSideEffect()) {
VLOG(1) << "Not transforming conditional; branches have side effects:"
<< conditional->ToString();
return false;
}
auto computation = conditional->parent();
HloInstruction* if_call_op =
computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(1)},
conditional->true_computation()));
conditional->SetupDerivedInstruction(if_call_op);
HloInstruction* else_call_op =
computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(2)},
conditional->false_computation()));
conditional->SetupDerivedInstruction(else_call_op);
HloInstruction* condition = conditional->mutable_operand(0);
if (else_call_op->shape().IsTuple()) {
VLOG(1) << "Not transforming tuples to 'select'";
return false;
}
TF_ASSIGN_OR_RETURN(
HloInstruction * select_op,
MakeSelectHlo(condition, if_call_op, else_call_op, conditional));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, select_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(if_call_op).status());
TF_RETURN_IF_ERROR(CallInliner::Inline(else_call_op).status());
return true;
}
absl::StatusOr<bool> ConditionalToSelect::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
bool did_mutate = false;
VLOG(1) << "Running conditional-to-select pass";
TF_RETURN_IF_ERROR(
call_graph->VisitNodes([&](const CallGraphNode& node) -> absl::Status {
std::vector<HloInstruction*> ToInline;
if (node.context() != CallContext::kEmbedded) {
return absl::OkStatus();
}
for (const CallSite& callsite : node.callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kConditional) {
VLOG(1) << "Visiting conditional: " << callsite.ToString();
HloInstruction* conditional = callsite.instruction();
TF_ASSIGN_OR_RETURN(bool result,
DoConditionalToSelect(conditional));
did_mutate |= result;
}
}
return absl::OkStatus();
}));
return did_mutate;
}
} | #include "xla/service/conditional_to_select.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ConditionalToSelectTest = HloTestBase;
using ::testing::_;
TEST_F(ConditionalToSelectTest, MapConditionalConstants) {
const std::string hlo_text = R"(
HloModule MapConditionalConstants
if {
%pif = () parameter(0)
ROOT %cif = f32[] constant(0)
}
else {
%pelse = () parameter(0)
ROOT %celse = f32[] constant(1)
}
mapped {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
%lt = pred[] compare(%a, %b), direction=LT
%t = () tuple()
ROOT %conditional = f32[] conditional(%lt, %t, %t), true_computation=if, false_computation=else
}
ENTRY comp {
%p1 = f32[1000]{0} parameter(0)
%p2 = f32[1000]{0} parameter(1)
ROOT %mapped = f32[1000]{0} map(%p1, %p2), dimensions={0}, to_apply=mapped
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ConditionalToSelect pass;
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_EQ(root->opcode(), HloOpcode::kMap);
HloComputation* mapped = root->called_computations()[0];
EXPECT_THAT(mapped->root_instruction(),
op::Select(op::Lt(op::Parameter(0), op::Parameter(1)),
op::Constant(), op::Constant()));
}
TEST_F(ConditionalToSelectTest, MapConditionalNonScalar) {
const std::string hlo_text = R"(
HloModule MapConditionalNonScalar
if {
%pif = () parameter(0)
%zero = f32[] constant(0)
ROOT %zero_broadcasted = f32[2,2]{1,0} broadcast(%zero), dimensions={}
}
else {
%pelse = () parameter(0)
%one = f32[] constant(0)
ROOT %one_broadcasted = f32[2,2]{1,0} broadcast(%one), dimensions={}
}
add {
%add_lhs = f32[] parameter(0)
%add_rhs = f32[] parameter(1)
ROOT %add = f32[] add(%add_lhs, %add_rhs)
}
mapped {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
%lt = pred[] compare(%a, %b), direction=LT
%t = () tuple()
%conditional = f32[2,2]{1,0} conditional(%lt, %t, %t), true_computation=if, false_computation=else
%zero = f32[] constant(0)
ROOT %reduced = f32[] reduce(%conditional, %zero), dimensions={0,1}, to_apply=add
}
ENTRY comp {
%p1 = f32[1000]{0} parameter(0)
%p2 = f32[1000]{0} parameter(1)
ROOT %mapped = f32[1000]{0} map(%p1, %p2), dimensions={0}, to_apply=mapped
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_text).value();
ConditionalToSelect pass;
ASSERT_TRUE(pass.Run(&*module).value());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_EQ(root->opcode(), HloOpcode::kMap);
HloComputation* mapped = root->called_computations()[0];
EXPECT_THAT(
mapped->root_instruction(),
op::Reduce(
op::Select(op::Broadcast(op::Lt(op::Parameter(0), op::Parameter(1))),
_, _),
_));
}
}
} |
1,978 | cpp | tensorflow/tensorflow | p2p_schedule_preparation | third_party/xla/xla/service/p2p_schedule_preparation.cc | third_party/xla/xla/service/p2p_schedule_preparation_test.cc | #ifndef XLA_SERVICE_P2P_SCHEDULE_PREPARATION_H_
#define XLA_SERVICE_P2P_SCHEDULE_PREPARATION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class P2PSchedulePreparation : public HloModulePass {
public:
absl::string_view name() const override {
return "latency-hiding-scheduler-preparation";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/p2p_schedule_preparation.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsP2POp(const HloInstruction* op) {
auto p2p = DynCast<HloSendRecvInstruction>(op);
return p2p != nullptr && !p2p->is_host_transfer();
}
bool IsCollectiveOp(const HloInstruction* op) {
HloOpcode opcode = op->opcode();
if (opcode == HloOpcode::kCustomCall) {
return true;
}
return hlo_query::IsAsyncCollectiveDoneOp(op, true) ||
(hlo_query::IsCollectiveCommunicationOp(opcode) &&
!hlo_query::IsAsyncCollectiveStartOp(op, true));
}
HloInstruction* GetStartOpForDoneOp(HloInstruction* op) {
switch (op->opcode()) {
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGatherDone:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
return op->mutable_operand(0);
default:
return op;
}
}
enum P2PGroupKind { kUnpipelined = 0, kPipelined = 1, kUnrecognized = 2 };
enum P2PRuntimeStream { kUnknown = 0, kStream0 = 1, kStream1 = 2 };
struct P2PGroupNode {
bool RecordParentComputation(HloComputation* parent) {
if (computation == nullptr) {
computation = parent;
return true;
}
return computation == parent;
}
bool RecordP2POp(HloSendRecvInstruction* p2p) {
if (!RecordParentComputation(p2p->parent())) {
return false;
}
switch (p2p->opcode()) {
case HloOpcode::kRecvDone:
if (recv_done == nullptr) {
recv_done = Cast<HloRecvDoneInstruction>(p2p);
return true;
}
break;
case HloOpcode::kSendDone:
if (send_done == nullptr) {
send_done = Cast<HloSendDoneInstruction>(p2p);
return true;
}
break;
case HloOpcode::kRecv:
if (recv == nullptr) {
recv = Cast<HloRecvInstruction>(p2p);
return true;
}
break;
case HloOpcode::kSend:
if (send == nullptr) {
send = Cast<HloSendInstruction>(p2p);
return true;
}
break;
default:
break;
}
return false;
}
bool RecordWhileOp(HloInstruction* while_op) {
if (while_loop != nullptr) {
return false;
}
if (!RecordParentComputation(while_op->parent())) {
return false;
}
while_loop = while_op;
return true;
}
bool Incomplete() const {
return recv_done == nullptr || send_done == nullptr || recv == nullptr ||
send == nullptr;
}
bool IncompletePipelinedParent() const {
return Incomplete() || while_loop == nullptr;
}
P2PRuntimeStream GetRuntimeStream(const HloInstruction* start) const {
auto it = start->frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it != start->frontend_attributes().map().end()) {
if (it->second == "0") {
return kStream0;
}
if (it->second == "1") {
return kStream1;
}
}
return kUnknown;
}
P2PRuntimeStream GetRuntimeStream() const {
P2PRuntimeStream send_stream = GetRuntimeStream(send);
P2PRuntimeStream recv_stream = GetRuntimeStream(recv);
if (send_stream != recv_stream) {
return kUnknown;
}
return send_stream;
}
int64_t GetChannel() const { return recv->channel_id().value(); }
HloRecvDoneInstruction* recv_done = nullptr;
HloSendDoneInstruction* send_done = nullptr;
HloRecvInstruction* recv = nullptr;
HloSendInstruction* send = nullptr;
HloComputation* computation = nullptr;
HloInstruction* while_loop = nullptr;
};
struct P2PGroup;
using P2PGroupMap = absl::flat_hash_map<int64_t, P2PGroup>;
using P2PInComputation =
absl::flat_hash_map<const HloComputation*, std::set<int64_t>>;
using CollectiveInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
using ChainStartEnd =
std::pair<HloSendRecvInstruction*, HloSendRecvInstruction*>;
static constexpr int kUnpipelinedNodeIdx = 0;
static constexpr int kPipelinedChildNodeIdx = 0;
static constexpr int kPipelinedParentNodeIdx = 1;
struct P2PGroup {
absl::Status RecordP2POpForUnpipelinedGroup(HloSendRecvInstruction* p2p) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind != kUnpipelined) {
return Internal("Expected unpipelined group");
}
P2PGroupNode& node = nodes[kUnpipelinedNodeIdx];
if (!node.RecordP2POp(p2p)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
absl::Status RecordP2POpForPipelinedGroup(HloSendRecvInstruction* p2p) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind == kUnpipelined) {
if (nodes[kPipelinedParentNodeIdx].computation != nullptr) {
return Internal("Expected unpipelined group");
}
kind = kPipelined;
}
P2PGroupNode& node = nodes[kPipelinedParentNodeIdx];
if (!node.RecordP2POp(p2p)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
absl::Status RecordWhileOpToPipelinedGroup(HloInstruction* while_op) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind == kUnpipelined) {
return Internal("Expected pipelined group");
}
P2PGroupNode& node = nodes[kPipelinedParentNodeIdx];
if (!node.RecordWhileOp(while_op)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
bool RecordRuntimeStream() {
P2PRuntimeStream child_stream =
nodes[kPipelinedChildNodeIdx].GetRuntimeStream();
if (kind == kPipelined) {
P2PRuntimeStream parent_stream =
nodes[kPipelinedParentNodeIdx].GetRuntimeStream();
if (child_stream != parent_stream || child_stream == kUnknown) {
return false;
}
}
runtime_stream = child_stream;
return true;
}
absl::Status RecordComplementGroup(P2PGroupMap& p2p_group_map) {
CHECK(!complement_group_channel.has_value() && runtime_stream == kStream1);
for (auto& [channel, p2p_group] : p2p_group_map) {
if (&p2p_group == this ||
p2p_group.ChildComputation() != ChildComputation()) {
continue;
}
if (p2p_group.kind == kPipelined &&
p2p_group.ParentComputation() == ParentComputation()) {
if (p2p_group.runtime_stream != kStream0) {
return Internal(
"Expected different pipeline stream for complement group");
}
complement_group_channel = channel;
p2p_group.complement_group_channel = GetChannel();
} else if (p2p_group.kind == kUnpipelined &&
p2p_group.runtime_stream == kStream0) {
complement_group_channel = channel;
p2p_group.complement_group_channel = GetChannel();
}
}
return absl::OkStatus();
}
HloComputation* ParentComputation() const { return GetParent().computation; }
HloComputation* ChildComputation() const { return GetChild().computation; }
int64_t GetChannel() const { return nodes[kUnpipelinedNodeIdx].GetChannel(); }
P2PGroupNode& GetChild() { return nodes[kPipelinedChildNodeIdx]; }
P2PGroupNode& GetParent() { return nodes[kPipelinedParentNodeIdx]; }
const P2PGroupNode& GetChild() const { return nodes[kPipelinedChildNodeIdx]; }
const P2PGroupNode& GetParent() const {
return nodes[kPipelinedParentNodeIdx];
}
ChainStartEnd GetChainStartEnd(const HloComputation* computation,
const P2PGroupMap& p2p_group_map) const {
if (computation == ChildComputation()) {
if (!InCycle()) {
return std::make_pair(GetChild().recv, GetChild().send_done);
}
if (runtime_stream == kStream1) {
return std::make_pair(
GetComplementGroup(p2p_group_map)->GetChild().recv,
GetChild().send_done);
}
return std::make_pair(
GetChild().recv,
GetComplementGroup(p2p_group_map)->GetChild().send_done);
}
CHECK(kind == kPipelined && computation == ParentComputation());
if (!InCycle()) {
return std::make_pair(GetParent().recv, GetParent().send_done);
}
if (runtime_stream == kStream1) {
return std::make_pair(GetComplementGroup(p2p_group_map)->GetParent().recv,
GetParent().send_done);
}
return std::make_pair(
GetParent().recv,
GetComplementGroup(p2p_group_map)->GetParent().send_done);
}
HloInstruction* GetWhileOp() const {
return nodes[kPipelinedParentNodeIdx].while_loop;
}
bool InCycle() const { return complement_group_channel.has_value(); }
P2PGroup* GetComplementGroup(P2PGroupMap& p2p_group_map) const {
CHECK(InCycle());
return &p2p_group_map.at(*complement_group_channel);
}
const P2PGroup* GetComplementGroup(const P2PGroupMap& p2p_group_map) const {
CHECK(InCycle());
return &p2p_group_map.at(*complement_group_channel);
}
P2PGroupKind kind = kUnpipelined;
P2PGroupNode nodes[2];
P2PRuntimeStream runtime_stream = kUnknown;
std::optional<int64_t> complement_group_channel = std::nullopt;
};
bool MayInvokeCollectiveOp(
const HloInstruction* hlo,
const CollectiveInComputation& collective_in_computation) {
if (IsCollectiveOp(hlo)) {
return true;
}
for (auto callee : hlo->called_computations()) {
auto collective_in_comp = collective_in_computation.find(callee);
if (collective_in_comp != collective_in_computation.end() &&
collective_in_comp->second) {
return true;
}
}
return false;
}
absl::Status MayAddWhileOpToPipelinedGroup(HloInstruction* while_op,
P2PInComputation& p2p_in_computation,
P2PGroupMap& p2p_group_map) {
if (while_op->while_init()->opcode() != HloOpcode::kTuple) {
return absl::OkStatus();
}
HloComputation* body = while_op->called_computations()[0];
auto p2p_in_while = p2p_in_computation.find(body);
if (p2p_in_while == p2p_in_computation.end()) {
return absl::OkStatus();
}
int pipelined_group = 0;
for (auto hlo : while_op->while_init()->operands()) {
if (hlo->opcode() != HloOpcode::kSendDone) {
continue;
}
int64_t channel_id = hlo->channel_id().value();
if (p2p_in_while->second.find(channel_id) == p2p_in_while->second.end()) {
continue;
}
auto group = p2p_group_map.find(channel_id);
if (group == p2p_group_map.end() || group->second.kind != kPipelined) {
continue;
}
pipelined_group++;
if (pipelined_group > 2) {
return Internal(
"Expecting up to two pipelined P2P groups for each while-loop");
}
TF_RETURN_IF_ERROR(group->second.RecordWhileOpToPipelinedGroup(while_op));
}
return absl::OkStatus();
}
absl::Status OrderBefore(HloInstruction* i1, HloInstruction* i2) {
TF_RETURN_IF_ERROR(i1->AddControlDependencyTo(i2));
VLOG(10) << "Add control predecessor " << i2->ToString();
return absl::OkStatus();
}
absl::Status ConnectP2P1NodeChain(const P2PGroupNode& node) {
HloRecvDoneInstruction* recv_done = node.recv_done;
HloRecvInstruction* recv = node.recv;
HloSendDoneInstruction* send_done = node.send_done;
HloSendInstruction* send = node.send;
TF_RETURN_IF_ERROR(OrderBefore(recv, send));
TF_RETURN_IF_ERROR(OrderBefore(send, recv_done));
TF_RETURN_IF_ERROR(OrderBefore(recv_done, send_done));
return absl::OkStatus();
}
absl::Status ConnectUnpipelinedP2P(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetChild());
}
absl::Status ConnectPipelined1P2PChild(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetChild());
}
absl::Status ConnectP2P2NodeChain(const P2PGroupNode& node0,
const P2PGroupNode& node1) {
HloSendRecvInstruction* recv_done0 = node0.recv_done;
HloRecvInstruction* recv0 = node0.recv;
HloSendRecvInstruction* send_done0 = node0.send_done;
HloSendInstruction* send0 = node0.send;
HloSendRecvInstruction* recv_done1 = node1.recv_done;
HloRecvInstruction* recv1 = node1.recv;
HloSendRecvInstruction* send_done1 = node1.send_done;
HloSendInstruction* send1 = node1.send;
TF_RETURN_IF_ERROR(OrderBefore(recv_done0, recv_done1));
TF_RETURN_IF_ERROR(OrderBefore(recv_done1, send_done0));
TF_RETURN_IF_ERROR(OrderBefore(send_done0, send_done1));
TF_RETURN_IF_ERROR(OrderBefore(recv0, send0));
TF_RETURN_IF_ERROR(OrderBefore(send0, recv1));
TF_RETURN_IF_ERROR(OrderBefore(recv1, send1));
TF_RETURN_IF_ERROR(OrderBefore(send1, recv_done0));
return absl::OkStatus();
}
absl::Status ConnectPipelined2P2PChild(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetChild(),
p2p_group.GetChild());
}
absl::Status ConnectPipelined1P2PParent(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetParent());
}
absl::Status ConnectPipelined2P2PParent(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetParent(),
p2p_group.GetParent());
}
absl::Status ConnectUnpipelined2P2P(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
CHECK(p2p_group.runtime_stream == kStream1);
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetChild(),
p2p_group.GetChild());
}
absl::Status GatherP2PGroupsAndCollectiveInfo(
const HloComputation* computation, P2PInComputation& p2p_in_computation,
P2PGroupMap& p2p_group_map,
CollectiveInComputation& collective_in_computation) {
collective_in_computation[computation] = false;
std::vector<HloInstruction*> while_ops;
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
collective_in_computation[computation] = true;
}
if (hlo->opcode() == HloOpcode::kWhile) {
while_ops.push_back(hlo);
continue;
}
if (!IsP2POp(hlo)) {
continue;
}
HloSendRecvInstruction* p2p = Cast<HloSendRecvInstruction>(hlo);
int64_t channel = p2p->channel_id().value();
auto p2p_group = p2p_group_map.find(channel);
if (p2p_group == p2p_group_map.end()) {
P2PGroup group;
TF_RETURN_IF_ERROR(group.RecordP2POpForUnpipelinedGroup(p2p));
p2p_group_map[channel] = group;
} else {
P2PGroup& group = p2p_group->second;
if (group.ChildComputation() == computation) {
TF_RETURN_IF_ERROR(group.RecordP2POpForUnpipelinedGroup(p2p));
} else {
TF_RETURN_IF_ERROR(grou | #include "xla/service/p2p_schedule_preparation.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class P2PSchedulePreparationTest : public HloTestBase {
public:
void VerifyP2PNotTransformed(HloModule* module,
const std::string& suffix = "") {
HloInstruction* recv = FindInstruction(module, "recv" + suffix);
HloInstruction* recv_done = FindInstruction(module, "recv-done" + suffix);
HloInstruction* send_done = FindInstruction(module, "send-done" + suffix);
EXPECT_EQ(recv->control_predecessors().size(), 0);
EXPECT_EQ(recv_done->control_predecessors().size(), 0);
EXPECT_EQ(send_done->control_predecessors().size(), 0);
}
void VerifyP2P1GroupChain(HloModule* module, const std::string& suffix) {
HloInstruction* send = FindInstruction(module, "send" + suffix);
HloInstruction* recv = FindInstruction(module, "recv" + suffix);
HloInstruction* recv_done = FindInstruction(module, "recv-done" + suffix);
HloInstruction* send_done = FindInstruction(module, "send-done" + suffix);
EXPECT_EQ(send->control_predecessors()[0], recv);
EXPECT_EQ(recv_done->control_predecessors()[0], send);
EXPECT_EQ(send_done->control_predecessors()[0], recv_done);
}
void VerifyUnpipelinedP2P(HloModule* module, const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyPipelinedP2PChild(HloModule* module,
const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyPipelinedP2PParent(HloModule* module,
const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyP2P2GroupChain(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
HloInstruction* send0 = FindInstruction(module, "send" + suffix0);
HloInstruction* recv0 = FindInstruction(module, "recv" + suffix0);
HloInstruction* recv_done0 = FindInstruction(module, "recv-done" + suffix0);
HloInstruction* send_done0 = FindInstruction(module, "send-done" + suffix0);
HloInstruction* send1 = FindInstruction(module, "send" + suffix1);
HloInstruction* recv1 = FindInstruction(module, "recv" + suffix1);
HloInstruction* recv_done1 = FindInstruction(module, "recv-done" + suffix1);
HloInstruction* send_done1 = FindInstruction(module, "send-done" + suffix1);
EXPECT_EQ(recv_done1->control_predecessors()[0], recv_done0);
EXPECT_EQ(send_done0->control_predecessors()[0], recv_done1);
EXPECT_EQ(send_done1->control_predecessors()[0], send_done0);
EXPECT_EQ(send0->control_predecessors()[0], recv0);
EXPECT_EQ(recv1->control_predecessors()[0], send0);
EXPECT_EQ(send1->control_predecessors()[0], recv1);
EXPECT_EQ(recv_done0->control_predecessors()[0], send1);
}
void VerifyPipelined2P2PChild(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
VerifyP2P2GroupChain(module, suffix0, suffix1);
}
void VerifyPipelined2P2PParent(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
VerifyP2P2GroupChain(module, suffix0, suffix1);
}
};
constexpr char kEmpty[] = "";
constexpr char kHostTransfer[] = ", is_host_transfer=true";
std::string GetUnnestedP2PModuleString(bool is_host = false,
bool incomplete = false) {
constexpr char kSend[] = R"(
send = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
} %s
send-done = token[] send-done(send), channel_id=2 %s
)";
constexpr char kSimpleModule[] = R"(
HloModule test
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
} %s
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2 %s
%s
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
}
)";
const char* is_host_str = is_host ? kHostTransfer : kEmpty;
if (incomplete) {
return absl::StrFormat(kSimpleModule, is_host_str, is_host_str, kEmpty);
}
std::string send_str = absl::StrFormat(kSend, is_host_str, is_host_str);
return absl::StrFormat(kSimpleModule, is_host_str, is_host_str, send_str);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainHostNotTransformed) {
std::string kModuleStr = GetUnnestedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainIncompleteNotTransformed) {
std::string kModuleStr =
GetUnnestedP2PModuleString(false, true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainTransformed) {
std::string kModuleStr = GetUnnestedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyUnpipelinedP2P(module.get());
}
std::string GetNestedP2PModuleString(bool while_p2p_is_host = false,
bool main_p2p_is_host = false) {
constexpr char kModuleTemplate[] = R"(
HloModule test
while-cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}"
} %s
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1 %s
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
send-done = token[] send-done(send), channel_id=1 %s
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
ROOT body-result = (u32[], f32[1, 1024, 1024]) tuple(new-count, recv-data)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=2 %s
send-done.1 = token[] send-done(send.1), channel_id=2 %s
recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1), index=0
while-init = (u32[], f32[1, 1024, 1024]) tuple(c0, recv-data.1)
while-result = (u32[], f32[1, 1024, 1024]) while(while-init),
body=while-body, condition=while-cond
while-result-data = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
ROOT entry-result = f32[1, 1024, 1024] add(while-result-data, recv-data.1)
}
)";
const char* while_p2p = while_p2p_is_host ? kHostTransfer : kEmpty;
const char* main_p2p = main_p2p_is_host ? kHostTransfer : kEmpty;
return absl::StrFormat(kModuleTemplate, while_p2p, while_p2p, while_p2p,
while_p2p, main_p2p, main_p2p, main_p2p, main_p2p);
}
TEST_F(P2PSchedulePreparationTest, WhileP2PIsHostNotMainTransformed) {
std::string kModuleStr = GetNestedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyP2PNotTransformed(module.get());
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* send_done = FindInstruction(module.get(), "send-done.1");
HloInstruction* while_loop = FindInstruction(module.get(), "while-result");
EXPECT_EQ(while_loop->control_predecessors()[0], send_done);
}
TEST_F(P2PSchedulePreparationTest, MainP2PIsHostNotWhileTransformed) {
std::string kModuleStr = GetNestedP2PModuleString(false,
true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyUnpipelinedP2P(module.get());
VerifyP2PNotTransformed(module.get(), ".1");
}
TEST_F(P2PSchedulePreparationTest, NestedP2PChainTransformed) {
std::string kModuleStr = GetNestedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyUnpipelinedP2P(module.get());
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* send_done = FindInstruction(module.get(), "send-done.1");
HloInstruction* recv_user = FindInstruction(module.get(), "while-result");
EXPECT_EQ(recv_user->control_predecessors()[0], send_done);
}
std::string GetPipelinedP2PModuleString(bool nested_p2p_in_main = false,
bool other_p2p_in_while = false,
bool test_custom_call = false) {
constexpr char kWhileForMain[] = R"(
while-cond-2 {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result-2 = pred[] compare(count, ub), direction=LT
}
while-body-2 {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.3 = token[] after-all()
recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=3,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}"
}
send.3 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.3),
channel_id=3, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}
recv-done.3 = (f32[1, 1024, 1024], token[]) recv-done(recv.3), channel_id=3
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.3), index=0
send-done.3 = token[] send-done(send.3), channel_id=3
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
ROOT body-result-2 = (u32[], f32[1, 1024, 1024]) tuple(new-count, recv-data)
}
)";
constexpr char kUnnestedResult[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
collective-permute.2 = f32[1, 1024, 1024] collective-permute(init),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, collective-permute.2)
)";
constexpr char kUnnestedResultWithCustomCall[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
custom-call = f32[1, 1024, 1024] custom-call(init),
custom_call_target="my_custom_call"
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, custom-call)
)";
constexpr char kNestedResult[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
while-init-2 = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while-2 = (u32[], f32[1, 1024, 1024]) while(while-init-2),
body=while-body-2, condition=while-cond-2,
backend_config={"known_trip_count":{"n":"25"}}
while-result-2 = f32[1, 1024, 1024] get-tuple-element(while-2), index=1
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, while-result-2)
)";
constexpr char kPipelinedWhileBodyWithoutOtherP2P[] = R"(
while-body {
param = (u32[], (f32[1, 1024, 1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1, 1024, 1024], token[]) get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
collective-permute.1 = f32[1, 1024, 1024] collective-permute(s),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
new-data = f32[1, 1024, 1024] add(c, collective-permute.1)
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], token[]) send(new-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
)";
constexpr char kPipelinedWhileBodyWithOtherP2P[] = R"(
while-body {
param = (u32[], (f32[1, 1024, 1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1, 1024, 1024], token[])get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
collective-permute.1 = f32[1, 1024, 1024] collective-permute(s),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
send-data = f32[1, 1024, 1024] add(c, collective-permute.1)
after-all.4 = token[] after-all()
send.4 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.4),
channel_id=4, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"
}
send-done.4 = token[] send-done(send.4), channel_id=4
recv.4 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.4), channel_id=4,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"
}
recv-done.4 = (f32[1, 1024, 1024], token[]) recv-done(recv.4), channel_id=4
new-data = f32[1, 1024, 1024] get-tuple-element(recv-done.4), index=0
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], token[]) send(new-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
)";
constexpr char kModuleTemplate[] = R"(
HloModule test
while-cond {
param = (u32[], (f32[1, 1024, 1024], u32[], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
%s
%s
ENTRY test-computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (f32[1, 1024, 1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], token[]) send(init, after-all.2),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(c0, recv-done.2, send-done.2)
while-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
while(while-init),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1, 1024, 1024], token[]) get-tuple-element(while-result), index=1
recv-data.2.q = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
%s
}
)";
const char* while_str = nested_p2p_in_main ? kWhileForMain : kEmpty;
const char* pipelined_while_body_str =
other_p2p_in_while ? kPipelinedWhileBodyWithOtherP2P
: kPipelinedWhileBodyWithoutOtherP2P;
const char* result_str =
nested_p2p_in_main ? kNestedResult
: (test_custom_call ? kUnnestedResultWithCustomCall
: kUnnestedResult);
return absl::StrFormat(kModuleTemplate, while_str, pipelined_while_body_str,
result_str);
}
TEST_F(P2PSchedulePreparationTest, UnnestedPipelinedP2PChainTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
HloInstruction* recv_1 = FindInstruction(module.get(), "recv.1");
HloInstruction* collective_1 =
FindInstruction(module.get(), "collective-permute.1");
EXPECT_EQ(recv_1->control_predecessors()[0], collective_1);
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* collective_2 =
FindInstruction(module.get(), "collective-permute.2");
EXPECT_TRUE((!collective_2->control_predecessors().empty() &&
collective_2->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == collective_2));
}
TEST_F(P2PSchedulePreparationTest, NestedPipelinedP2PChainTransformed) {
std::string kModuleStr =
GetPipelinedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
VerifyUnpipelinedP2P(module.get(), ".3");
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* while_2 = FindInstruction(module.get(), "while-2");
EXPECT_TRUE((!while_2->control_predecessors().empty() &&
while_2->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == while_2));
}
TEST_F(P2PSchedulePreparationTest,
UnnestedPipelinedP2PChainWithOtherP2PTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString(
false, true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
VerifyUnpipelinedP2P(module.get(), ".4");
HloInstruction* pipelined_recv = FindInstruction(module.get(), "recv.1");
HloInstruction* other_send_done =
FindInstruction(module.get(), "send-done.4");
EXPECT_EQ(1, absl::c_count(pipelined_recv->control_predecessors(),
other_send_done));
}
TEST_F(P2PSchedulePreparationTest,
UnnestedPipelinedP2PChainWithCustomCallTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString(
false, false,
true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
EXPECT_TRUE((!custom_call->control_predecessors().empty() &&
custom_call->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == custom_call));
}
TEST_F(P2PSchedulePreparationTest, PipelinedP2PChain2Transformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(10)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.0.f = (u32[2], token[]) get-tuple-element(param), index=1
recv-data.0 = u32[2] get-tuple-element(recv-done.0.f), index=0
recv-done.1.f = (u32[2], token[]) get-tuple-element(param), index=2
recv-data.1 = u32[2] get-tuple-element(recv-done.1.f), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(s, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1.n = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1.n), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (u32[2], u32[], token[]) send(s, after-all.1.n),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
ROOT result = (u32[], (u32[2], token[]), (u32[2], token[]), token[], token[])
tuple(new_count, recv-done.0, recv-done.1, send-done.0, send-done.1)
}
ENTRY test_computation {
c0 = u32[ |
1,979 | cpp | tensorflow/tensorflow | ar_crs_combiner | third_party/xla/xla/service/ar_crs_combiner.cc | third_party/xla/xla/service/ar_crs_combiner_test.cc | #ifndef XLA_SERVICE_AR_CRS_COMBINER_H_
#define XLA_SERVICE_AR_CRS_COMBINER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ArCrsCombiner : public HloModulePass {
public:
ArCrsCombiner(int num_spatial_partitions, bool spmd_partition)
: num_spatial_partitions_(num_spatial_partitions),
spmd_partition_(spmd_partition) {}
absl::string_view name() const override { return "ar-crs-combiner"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
static bool TestInstructionsComputeSameValue(HloInstruction* i1,
HloInstruction* i2);
private:
struct ArCrsPair {
HloInstruction* ar;
HloInstruction* crs;
int64_t distance;
ArCrsPair(HloInstruction* all_reduce, HloInstruction* cross_replica_sum,
int64_t dist)
: ar(all_reduce), crs(cross_replica_sum), distance(dist) {}
std::string ToString() {
std::string result;
absl::StrAppend(&result, "(");
HloInstruction* instruction = ar;
while (instruction != crs) {
absl::StrAppend(&result, instruction->name(), ",");
instruction = instruction->users()[0];
}
absl::StrAppend(&result, instruction->name(),
")[id:", *(ar->channel_id()), ",dist:", distance, "]");
return result;
}
};
std::optional<ArCrsCombiner::ArCrsPair> MatchesArCrsPattern(
HloInstruction* instruction);
std::optional<HloInstruction*> WhileFromBodyParameter(
HloInstruction* instruction);
std::optional<HloInstruction*> ConditionalFromBodyParameter(
HloInstruction* instruction);
std::optional<std::vector<HloInstruction*>> GetAllTuples(
HloInstruction* instruction,
absl::flat_hash_set<HloInstruction*>* visited);
bool TupleElementsComputeSameValue(
HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs);
bool InstructionsComputeSameValue(
HloInstruction* i1, HloInstruction* i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs);
void GroupAllReducesById(HloModule* module);
absl::Status KeepProvablyEqualInstructionGroupsMPMD();
absl::Status KeepProvablyEqualInstructionGroupsSPMD(HloModule* module);
absl::StatusOr<bool> RewriteGraph();
int num_spatial_partitions_;
bool spmd_partition_;
absl::flat_hash_map<int64_t, std::vector<ArCrsPair>> all_reduce_map_;
absl::flat_hash_map<HloInstruction*, int64_t> crs_reserved_map_;
std::unique_ptr<CallGraph> call_graph_;
};
}
#endif
#include "xla/service/ar_crs_combiner.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<bool> ReplaceReplicatedAllReduce(HloModule* module,
int64_t partition_count) {
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module, true));
bool changed = false;
int64_t next_channel = hlo_query::NextChannelId(*module);
for (auto computation : module->computations()) {
for (auto instruction : computation->instructions()) {
if (auto ar = DynCast<HloAllReduceInstruction>(instruction)) {
const Shape& shape = ar->shape();
if (ar->channel_id()) {
continue;
}
if (ar->replica_groups().size() > 1) {
continue;
}
if (shape.IsTuple() || shape.element_type() != F32) {
continue;
}
if (module->config().replica_count() < 8 * partition_count) {
continue;
}
if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) {
VLOG(2) << "Replaced replicated all-reduce:" << ar->ToString();
ar->set_channel_id(next_channel++);
auto divisor =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<float>(partition_count)));
auto bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(shape, divisor, {}));
auto div = computation->AddInstruction(HloInstruction::CreateBinary(
ar->shape(), HloOpcode::kDivide, ar, bcast));
TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div));
changed = true;
}
}
}
}
return changed;
}
bool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) {
auto all_reduce = Cast<HloAllReduceInstruction>(hlo);
auto replica_groups = all_reduce->replica_groups();
const int64_t replica_count = hlo->GetModule()->config().replica_count();
CHECK(all_reduce->IsCrossModuleAllReduce());
if (all_reduce->use_global_device_ids()) {
if (replica_groups.size() != replica_count) {
return false;
}
for (const auto& group : replica_groups) {
if (group.replica_ids_size() != num_partitions) {
return false;
}
absl::flat_hash_set<int64_t> partition_ids;
int64_t replica_id = group.replica_ids(0) / num_partitions;
for (int64_t i = 0; i < num_partitions; ++i) {
if (group.replica_ids(i) / num_partitions != replica_id) {
return false;
}
partition_ids.insert(group.replica_ids(i) % num_partitions);
}
if (partition_ids.size() != num_partitions) {
return false;
}
}
return true;
}
return replica_groups.size() == replica_count;
}
}
namespace m = match;
std::optional<ArCrsCombiner::ArCrsPair> ArCrsCombiner::MatchesArCrsPattern(
HloInstruction* instruction) {
auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool {
if (instruction->user_count() != 1) {
return false;
}
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
return true;
case HloOpcode::kConvert:
return ShapeUtil::ElementIsFloating(instruction->shape()) ==
ShapeUtil::ElementIsFloating(instruction->operand(0)->shape());
case HloOpcode::kAdd:
case HloOpcode::kSubtract:
case HloOpcode::kMultiply:
return ShapeUtil::ElementIsFloating(instruction->shape());
default:
return false;
}
};
auto computation_is_addition = [](HloComputation* c) {
return c->instruction_count() == 3 &&
Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter()));
};
if (instruction->IsCrossModuleAllReduce() &&
HasCombinableReplicaGroup(instruction, num_spatial_partitions_) &&
computation_is_addition(instruction->called_computations()[0]) &&
instruction->user_count() == 1) {
auto next = instruction->users()[0];
int64_t distance = 1;
while (!next->IsCrossReplicaAllReduce()) {
if (can_ar_move_past_instruction(next)) {
next = next->users()[0];
} else {
return std::nullopt;
}
++distance;
}
if (!Cast<HloAllReduceInstruction>(next)->IsNoop() &&
computation_is_addition(next->called_computations()[0])) {
ArCrsPair pair(instruction, next, distance);
VLOG(2) << "ArCrsPair matching pattern: " << pair.ToString();
return pair;
}
}
return std::nullopt;
}
std::optional<HloInstruction*> ArCrsCombiner::WhileFromBodyParameter(
HloInstruction* instruction) {
CHECK_EQ(HloOpcode::kParameter, instruction->opcode());
HloComputation* computation = instruction->parent();
auto caller_instructions = call_graph_->GetComputationCallers(computation);
if (caller_instructions.size() == 1) {
auto caller_instruction = caller_instructions[0];
if (caller_instruction->opcode() == HloOpcode::kWhile) {
return caller_instruction;
}
}
return std::nullopt;
}
std::optional<HloInstruction*> ArCrsCombiner::ConditionalFromBodyParameter(
HloInstruction* instruction) {
CHECK_EQ(HloOpcode::kParameter, instruction->opcode());
HloComputation* computation = instruction->parent();
auto caller_instructions = call_graph_->GetComputationCallers(computation);
if (caller_instructions.size() == 1) {
auto caller_instruction = caller_instructions[0];
if (caller_instruction->opcode() == HloOpcode::kConditional) {
return caller_instruction;
}
}
return std::nullopt;
}
std::optional<std::vector<HloInstruction*>> ArCrsCombiner::GetAllTuples(
HloInstruction* instruction,
absl::flat_hash_set<HloInstruction*>* visited) {
if (visited->find(instruction) != visited->end()) {
return std::vector<HloInstruction*>();
}
visited->insert(instruction);
switch (instruction->opcode()) {
case HloOpcode::kTuple: {
return std::vector<HloInstruction*>({instruction});
}
case HloOpcode::kDomain: {
return GetAllTuples(instruction->operands()[0], visited);
}
case HloOpcode::kParameter: {
auto maybe_while = WhileFromBodyParameter(instruction);
if (maybe_while) {
auto while_instr = *maybe_while;
auto init_tuples = GetAllTuples(while_instr->while_init(), visited);
auto body_tuples = GetAllTuples(
while_instr->while_body()->root_instruction(), visited);
if (!init_tuples || !body_tuples) {
return std::nullopt;
}
auto result = *init_tuples;
result.insert(result.end(), body_tuples->begin(), body_tuples->end());
return result;
}
auto maybe_conditional = ConditionalFromBodyParameter(instruction);
if (maybe_conditional) {
auto cond_instr = *maybe_conditional;
std::vector<HloInstruction*> tuples;
for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) {
if (cond_instr->branch_computation(i)->parameter_instruction(0) ==
instruction) {
auto branch_tuples =
GetAllTuples(cond_instr->mutable_operand(i + 1), visited);
if (!branch_tuples) {
return std::nullopt;
}
tuples.insert(tuples.end(), branch_tuples->begin(),
branch_tuples->end());
}
}
return tuples;
}
return std::nullopt;
}
case HloOpcode::kGetTupleElement: {
std::vector<HloInstruction*> result_tuples;
auto tuples = GetAllTuples(instruction->operands()[0], visited);
if (!tuples) {
return std::nullopt;
}
for (auto tuple : *tuples) {
auto tmp_tuples = GetAllTuples(
tuple->mutable_operand(instruction->tuple_index()), visited);
if (!tmp_tuples) {
return std::nullopt;
}
result_tuples.insert(result_tuples.end(), tmp_tuples->begin(),
tmp_tuples->end());
}
return result_tuples;
}
case HloOpcode::kConditional: {
std::vector<HloInstruction*> result_tuples;
const auto& branch_computations = instruction->branch_computations();
result_tuples.reserve(branch_computations.size());
for (HloComputation* body : branch_computations) {
if (body->root_instruction()->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
result_tuples.push_back(body->root_instruction());
}
return result_tuples;
}
case HloOpcode::kWhile: {
auto init_tuples = GetAllTuples(instruction->while_init(), visited);
auto body_tuples =
GetAllTuples(instruction->while_body()->root_instruction(), visited);
if (!init_tuples || !body_tuples) {
return std::nullopt;
}
auto result = *init_tuples;
result.insert(result.end(), body_tuples->begin(), body_tuples->end());
return result;
}
default:
return std::nullopt;
}
}
bool ArCrsCombiner::TupleElementsComputeSameValue(
HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs) {
absl::flat_hash_set<HloInstruction*> visited;
auto tuples = GetAllTuples(tuple_shaped_instruction, &visited);
if (!tuples) {
return false;
}
for (auto tuple : *tuples) {
CHECK_EQ(tuple->opcode(), HloOpcode::kTuple);
if (!InstructionsComputeSameValue(tuple->mutable_operand(i1),
tuple->mutable_operand(i2),
visited_pairs)) {
return false;
}
}
return true;
}
bool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1,
HloInstruction* i2) {
ArCrsCombiner combiner(2,
false);
auto module = i1->GetModule();
CHECK_EQ(module, i2->GetModule());
combiner.call_graph_ = CallGraph::Build(module);
absl::flat_hash_map<int64_t, int64_t> visited_pairs;
return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs);
}
bool ArCrsCombiner::InstructionsComputeSameValue(
HloInstruction* i1, HloInstruction* i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs) {
if (i1 == i2) {
return true;
}
auto uid1 = i1->unique_id();
auto uid2 = i2->unique_id();
auto min_uid = std::min(uid1, uid2);
auto max_uid = std::max(uid1, uid2);
auto it = visited_pairs->find(min_uid);
if (it != visited_pairs->end() && max_uid == it->second) {
return true;
}
auto opcode1 = i1->opcode();
auto operands1 = i1->operands();
if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) {
return false;
}
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
auto eq_operands = [](const HloInstruction*, const HloInstruction*) {
return true;
};
if (i1->IsCrossModuleAllReduce()) {
return i1->Identical(*i2, eq_operands, eq_computations,
false);
}
visited_pairs->emplace(min_uid, max_uid);
for (int i = 0; i < operands1.size(); ++i) {
auto operand1 = operands1[i];
auto operand2 = i2->operands()[i];
if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) {
return false;
}
}
if (opcode1 == HloOpcode::kParameter) {
return false;
}
if (opcode1 == HloOpcode::kGetTupleElement) {
return i1->tuple_index() == i2->tuple_index() ||
TupleElementsComputeSameValue(operands1[0], i1->tuple_index(),
i2->tuple_index(), visited_pairs);
}
auto eq_instructions = [](const HloInstruction* i1,
const HloInstruction* i2) -> bool { return true; };
return i1->Identical(*i2, eq_instructions, eq_computations,
false);
}
void ArCrsCombiner::GroupAllReducesById(HloModule* module) {
absl::flat_hash_set<int64_t> discarded_ar_ids;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
auto maybe_pair = MatchesArCrsPattern(instruction);
if (maybe_pair) {
auto pair = *maybe_pair;
int64_t ar_id = *(instruction->channel_id());
if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) {
continue;
}
auto it = crs_reserved_map_.find(pair.crs);
if (it != crs_reserved_map_.end()) {
auto prev_ar_id = it->second;
CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end());
CHECK_NE(prev_ar_id, ar_id);
auto prev_pair = all_reduce_map_[prev_ar_id].back();
int64_t prev_distance = prev_pair.distance;
if (prev_distance < pair.distance) {
VLOG(2) << "Replacing ArCrsPair: " << prev_pair.ToString()
<< " with ArCrsPair: " << pair.ToString();
all_reduce_map_.erase(prev_ar_id);
discarded_ar_ids.insert(prev_ar_id);
all_reduce_map_[ar_id].push_back(pair);
crs_reserved_map_[pair.crs] = ar_id;
} else {
discarded_ar_ids.insert(ar_id);
}
} else {
if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) {
int64_t prev_distance = all_reduce_map_[ar_id].back().distance;
CHECK_EQ(prev_distance, pair.distance)
<< "All ARs with the same AR ID must have the same distance "
"from the corresponding CRSs. Found: "
<< prev_distance << " and " << pair.distance;
}
all_reduce_map_[ar_id].push_back(pair);
crs_reserved_map_[pair.crs] = ar_id;
}
}
}
}
}
absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() {
for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {
auto copy_it = it++;
auto channel_id = copy_it->first;
VLOG(2)
<< "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: "
<< channel_id << "\n";
auto pairs_vec = copy_it->second;
TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_);
auto instr_0 = pairs_vec[0].ar;
for (int i = 1; i < pairs_vec.size(); ++i) {
auto instr_i = pairs_vec[i].ar;
auto next_0 = instr_0->users()[0];
auto next_i = instr_i->users()[0];
absl::flat_hash_map<int64_t, int64_t> visited_pairs;
while (true) {
if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) {
all_reduce_map_.erase(copy_it);
VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce "
"channel id: "
<< channel_id << "\n";
break;
}
if (next_0->IsCrossReplicaAllReduce()) {
break;
}
next_0 = next_0->users()[0];
next_i = next_i->users()[0];
}
}
}
return absl::OkStatus();
}
absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD(
HloModule* module) {
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module, true));
for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {
auto copy_it = it++;
auto channel_id = copy_it->first;
VLOG(2)
<< "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: "
<< channel_id << "\n";
auto pairs_vec = copy_it->second;
TF_RET_CHECK(pairs_vec.size() == 1);
auto instr = pairs_vec[0].ar;
auto next = instr->users()[0];
while (true) {
TF_RET_CHECK(next->shape().IsArray());
if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) {
all_reduce_map_.erase(copy_it);
VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce "
"channel id: "
<< channel_id << "\n";
break;
}
if (next->IsCrossReplicaAllReduce()) {
break;
}
next = next->users()[0];
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> ArCrsCombiner::RewriteGraph() {
if (all_reduce_map_.empty()) {
return false;
}
for (const auto& it : all_reduce_map_) {
auto pairs_vec = it.second;
for (auto pair : pairs_vec) {
auto all_reduce = pair.ar;
auto parent_computation = all_reduce->parent();
auto channel_id = all_reduce->channel_id();
auto prev = all_reduce->mutable_operand(0);
auto next = all_reduce->users()[0];
TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev));
TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce));
while (!next->IsCrossReplicaAllReduce()) {
switch (next->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
case HloOpcode::kConvert:
case HloOpcode::kMultiply:
break;
case HloOpcode::kAdd:
case HloOpcode::kSubtract: {
auto other_operand = (next->operands()[0] == prev)
? next->operands()[1]
: next->operands()[0];
if (other_operand->IsCrossModuleAllReduce() &&
other_operand->user_count() == 1) {
TF_CHECK_OK(other_operand->ReplaceAllUsesWith(
other_operand->mutable_operand(0)));
} else {
auto shape = other_operand->shape();
Literal lit(shape);
lit.PopulateWithValue<float>(num_spatial_partitions_);
auto divisor = parent_computation->AddInstruction(
HloInstruction::CreateConstant(lit.Clone()));
auto division = parent_computation->AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kDivide,
other_operand, divisor));
TF_CHECK_OK(other_operand->ReplaceUseWith(next, division));
}
break;
}
default:
LOG(FATAL) << "Unexpected instruction: " << next->ToShortString();
}
prev = next;
next = next->users()[0];
}
next->set_channel_id(channel_id);
}
}
return true;
}
absl::StatusOr<bool> ArCrsCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_graph_ = CallGraph::Build(module);
GroupAllReducesById(module);
if (spmd_partition_) {
TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module));
} else {
TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD());
}
TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph());
if (module->config().replica_count() > 1 && spmd_partition_) {
TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce(
module, num_spatial_partitions_));
changed |= replaced;
}
return changed;
}
} | #include "xla/service/ar_crs_combiner.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ArCrsCombinerTest : public HloTestBase {};
TEST_F(ArCrsCombinerTest, SameValueTestBasecase) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}})
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(
i1, module->entry_computation()->parameter_instruction(0)));
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestBasecase2) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) {
%x = f32[] parameter(0)
ROOT %tuple = (f32[], f32[]) tuple(%x, %x)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestBasecase3) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %tuple = (f32[], f32[]) tuple(%x, %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestNumOperands) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple1 = (f32[2,2]) tuple(%constant.f32)
%tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {
%p = f32[2] parameter(0)
%slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}
%slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]}
ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {
%p = f32[2] parameter(0)
%slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}
%slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]}
ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile1) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0];
auto i2 = body_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile2) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}})
%constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0];
auto i2 = body_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile3) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0]->operands()[0];
auto i2 = body_tuple->operands()[1]->operands()[0];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
ROOT %t = pred[] constant(true)
}
%body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%gte.1 = f32[2,2] get-tuple-element(%x), index=0
%gte.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%gte.1, %constant.f32)
%add.2 = f32[2,2] add(%gte.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
%body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%gte.1 = f32[2,2] get-tuple-element(%x), index=0
%gte.2 = f32[2,2] get-tuple-element(%x), index=1
%init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2)
ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition,
body=%body_inner
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition,
body=%body_outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto inner_while = root_while->while_body()->root_instruction();
auto i1 = inner_while->while_body()->root_instruction()->operands()[0];
auto i2 = inner_while->while_body()->root_instruction()->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
void CompareReplicaGroups(absl::Span<const ReplicaGroup> groups_before,
absl::Span<const ReplicaGroup> groups_after) {
ASSERT_EQ(groups_before.size(), groups_after.size());
for (int i = 0; i < groups_before.size(); ++i) {
auto group_before = groups_before[i];
std::vector<int64_t> ids_before(group_before.replica_ids().begin(),
group_before.replica_ids().end());
auto group_after = groups_after[i];
std::vector<int64_t> ids_after(group_after.replica_ids().begin(),
group_after.replica_ids().end());
EXPECT_EQ(ids_before, ids_after);
}
}
TEST_F(ArCrsCombinerTest, RewriteArConvertCrs) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%convert.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Convert(op::Parameter())),
op::AllReduce(op::Convert(op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2, true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Convert(op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) {
const char* module_str = R"(
HloModule foobar
%sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] {
%a = f32[2,1] parameter(0)
%b = f32[2,1] parameter(1)
ROOT %add = f32[2,1] add(%a, %b)
}
%sum.2 (x: f32[2], y: f32[2]) -> f32[2] {
%x = f32[2] parameter(0)
%y = f32[2] parameter(1)
ROOT %add = f32[2] add(%x, %y)
}
ENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) {
%p = f32[2,1] parameter(0)
%all-reduce.ar.1 = f32[2,1]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=0}
%bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1)
%all-reduce.1 = f32[2]
all-reduce(%bitcast.1),
replica_groups={{0,1}},
to_apply=%sum.2,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[2,1]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=1}
%bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2)
%all-reduce.2 = f32[2]
all-reduce(%bitcast.2),
replica_groups={{0,1}},
to_apply=%sum.2,
sharding={maximal device=1}
ROOT %tuple = (f32[2], f32[2])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())),
op::AllReduce(op::Bitcast(op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=0}
%multiply.1 = f32[]
multiply(%all-reduce.ar.1, %constant.f32),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%multiply.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=1}
%multiply.2 = f32[]
multiply(%all-reduce.ar.2, %constant.f32),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%multiply.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())),
op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.f32
%multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32)
%all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32 = f32[] constant(2)
%all-reduce.ar.1 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%add.1 = f32[]
add(%constant.f32, %convert.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%add.2 = f32[]
add(%constant.f32, %convert.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%add.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),
op::Convert())),
op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),
op::Convert()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32 = f32[] constant(2)
%all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0}
%add.1 = f32[] add(%constant.f32, %convert.1)
%all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()), op::Convert()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32.1 = f32[] constant(2)
%constant.f32.2 = f32[] constant(3)
%all-reduce.ar.1 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%add.1 = f32[]
add(%constant.f32.1, %convert.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%add.2 = f32[]
add(%constant.f32.2, %convert.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%add.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32.1 = f32[] constant(2)
%all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%add.1 = f32[] add(%p, %convert.1)
%all-reduce.1 = f32[] all-reduce(%add.1), replica_group |
1,980 | cpp | tensorflow/tensorflow | hlo_execution_profile | third_party/xla/xla/service/hlo_execution_profile.cc | third_party/xla/xla/service/hlo_execution_profile_test.cc | #ifndef XLA_SERVICE_HLO_EXECUTION_PROFILE_H_
#define XLA_SERVICE_HLO_EXECUTION_PROFILE_H_
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/map_util.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_execution_profile_data.pb.h"
#include "xla/service/hlo_profile_printer.h"
#include "xla/types.h"
namespace xla {
class HloInstruction;
class HloProfileIndexMap {
public:
explicit HloProfileIndexMap(const HloModule& module)
: HloProfileIndexMap(module, {}) {}
explicit HloProfileIndexMap(const HloModule& module,
absl::Span<const std::string> extra_metrics);
HloProfileIndexMap(const HloProfileIndexMap&) = default;
HloProfileIndexMap(HloProfileIndexMap&&) = default;
HloProfileIndexMap& operator=(const HloProfileIndexMap&) = default;
HloProfileIndexMap& operator=(HloProfileIndexMap&&) = default;
size_t GetProfileIndexFor(const HloInstruction& instruction) const {
return FindOrDie(instruction_to_profile_idx(), &instruction);
}
size_t GetProfileIndexFor(const HloComputation& computation) const {
return FindOrDie(computation_to_profile_idx(), &computation);
}
size_t GetProfileIndexFor(const std::string& key) const {
return xla::FindOrDie(extra_metric_to_profile_idx(), key);
}
size_t instruction_count() const {
return instruction_to_profile_idx().size();
}
size_t computation_count() const {
return computation_to_profile_idx().size();
}
size_t extra_metrics_count() const {
return extra_metric_to_profile_idx().size();
}
size_t total_count() const {
return instruction_count() + computation_count() + extra_metrics_count();
}
const absl::flat_hash_map<const HloInstruction*, int64_t>&
instruction_to_profile_idx() const {
return instruction_to_profile_idx_;
}
const absl::flat_hash_map<const HloComputation*, int64_t>&
computation_to_profile_idx() const {
return computation_to_profile_idx_;
}
const absl::flat_hash_map<std::string, int64_t>& extra_metric_to_profile_idx()
const {
return extra_metric_to_profile_idx_;
}
private:
absl::flat_hash_map<const HloInstruction*, int64_t>
instruction_to_profile_idx_;
absl::flat_hash_map<const HloComputation*, int64_t>
computation_to_profile_idx_;
absl::flat_hash_map<std::string, int64_t> extra_metric_to_profile_idx_;
};
std::unique_ptr<HloProfilePrinterData> CreateHloProfilePrinterData(
const HloProfileIndexMap& hlo_profile_index_map,
const HloCostAnalysis& cost_analysis,
absl::string_view entry_computation_name);
class HloExecutionProfile {
public:
HloExecutionProfile(const HloProfilePrinterData* hlo_profile_printer_data,
const HloProfileIndexMap* hlo_profile_index_map);
void SetCyclesTakenBy(const HloInstruction* hlo, uint64_t cycles_taken);
void SetCyclesTakenBy(size_t index, uint64_t cycles_taken);
uint64_t GetCyclesTakenBy(const HloInstruction& hlo) const;
uint64_t GetCyclesTakenBy(size_t index) const;
uint64_t total_cycles_executed(const HloComputation& computation) const {
return profile_counters_[hlo_profile_index_map_.GetProfileIndexFor(
computation)];
}
void set_total_cycles_executed(const HloComputation& computation,
uint64_t total_cycles_executed) {
profile_counters_[hlo_profile_index_map_.GetProfileIndexFor(computation)] =
total_cycles_executed;
}
void set_extra_metrics(const std::string& metric, uint64_t value) {
profile_counters_[hlo_profile_index_map_.GetProfileIndexFor(metric)] =
value;
}
std::string ToString(float clock_rate_ghz) const {
return PrintHloProfile(hlo_profile_printer_data_, profile_counters_.data(),
clock_rate_ghz);
}
std::vector<int64_t>* mutable_profile_counters() {
return &profile_counters_;
}
const std::vector<int64_t>& profile_counters() const {
return profile_counters_;
}
HloExecutionProfileData ToProto() const;
private:
const HloProfilePrinterData& hlo_profile_printer_data_;
const HloProfileIndexMap& hlo_profile_index_map_;
std::vector<int64_t> profile_counters_;
};
}
#endif
#include "xla/service/hlo_execution_profile.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_execution_profile_data.pb.h"
#include "xla/service/human_readable_profile_builder.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
HloProfileIndexMap::HloProfileIndexMap(
const HloModule& module, absl::Span<const std::string> extra_metrics) {
size_t current_profile_index = 0;
for (xla::HloComputation* computation : module.MakeComputationPostOrder()) {
InsertOrDie(&computation_to_profile_idx_, computation,
current_profile_index++);
for (const HloInstruction* instruction : computation->instructions()) {
InsertOrDie(&instruction_to_profile_idx_, instruction,
current_profile_index++);
}
}
for (const std::string& key : extra_metrics) {
InsertOrDie(&extra_metric_to_profile_idx_, key, current_profile_index++);
}
}
std::unique_ptr<HloProfilePrinterData> CreateHloProfilePrinterData(
const HloProfileIndexMap& hlo_profile_index_map,
const HloCostAnalysis& cost_analysis,
absl::string_view entry_computation_name) {
using HloComputationInfo = HloProfilePrinterData::HloComputationInfo;
using HloInstructionInfo = HloProfilePrinterData::HloInstructionInfo;
size_t profile_counters_size = hlo_profile_index_map.total_count();
std::unique_ptr<HloProfilePrinterData> profile_printer_data =
std::make_unique<HloProfilePrinterData>();
profile_printer_data->set_profile_counters_size(profile_counters_size);
profile_printer_data->mutable_computation_infos()->Reserve(
hlo_profile_index_map.computation_count());
const auto& computation_to_profile_idx_map =
hlo_profile_index_map.computation_to_profile_idx();
std::vector<std::pair<const HloComputation*, int64_t>>
computation_and_profile_idx_list(computation_to_profile_idx_map.begin(),
computation_to_profile_idx_map.end());
absl::c_sort(computation_and_profile_idx_list,
[](const std::pair<const HloComputation*, int64_t>& left,
const std::pair<const HloComputation*, int64_t>& right) {
return left.second < right.second;
});
for (const auto& pair : computation_and_profile_idx_list) {
CHECK_LT(pair.second, profile_counters_size);
const HloComputation* computation = pair.first;
HloComputationInfo* computation_info =
profile_printer_data->add_computation_infos();
*computation_info->mutable_name() = std::string(computation->name());
computation_info->set_profile_index(pair.second);
computation_info->mutable_instruction_infos()->Reserve(
computation->instruction_count());
for (const HloInstruction* hlo : computation->instructions()) {
HloInstructionInfo* instruction_info =
computation_info->add_instruction_infos();
instruction_info->set_long_name(hlo->ToString());
instruction_info->set_short_name(hlo->ToString(
HloPrintOptions().set_compact_operands(true).set_print_operand_names(
false)));
instruction_info->set_category(hlo->ToCategory());
instruction_info->set_flop_count(cost_analysis.flop_count(*hlo));
instruction_info->set_transcendental_count(
cost_analysis.transcendental_count(*hlo));
instruction_info->set_bytes_accessed(cost_analysis.bytes_accessed(*hlo));
instruction_info->set_optimal_seconds(
cost_analysis.optimal_seconds(*hlo));
instruction_info->set_profile_index(
hlo_profile_index_map.GetProfileIndexFor(*hlo));
}
}
for (const auto& pair : hlo_profile_index_map.extra_metric_to_profile_idx()) {
profile_printer_data->mutable_extra_metrics()->insert(
{pair.first, pair.second});
}
*profile_printer_data->mutable_entry_computation() =
std::string(entry_computation_name);
return profile_printer_data;
}
HloExecutionProfile::HloExecutionProfile(
const HloProfilePrinterData* hlo_profile_printer_data,
const HloProfileIndexMap* hlo_profile_index_map)
: hlo_profile_printer_data_(*hlo_profile_printer_data),
hlo_profile_index_map_(*hlo_profile_index_map),
profile_counters_(
hlo_profile_index_map_.total_count(),
0) {}
void HloExecutionProfile::SetCyclesTakenBy(const HloInstruction* hlo,
uint64_t cycles_taken) {
SetCyclesTakenBy(hlo_profile_index_map_.GetProfileIndexFor(*hlo),
cycles_taken);
}
void HloExecutionProfile::SetCyclesTakenBy(size_t index,
uint64_t cycles_taken) {
profile_counters_[index] = cycles_taken;
}
uint64_t HloExecutionProfile::GetCyclesTakenBy(
const HloInstruction& hlo) const {
return GetCyclesTakenBy(hlo_profile_index_map_.GetProfileIndexFor(hlo));
}
uint64_t HloExecutionProfile::GetCyclesTakenBy(size_t index) const {
return profile_counters_[index];
}
HloExecutionProfileData HloExecutionProfile::ToProto() const {
HloExecutionProfileData hlo_execution_profile_data;
hlo_execution_profile_data.mutable_profile_counters()->Reserve(
profile_counters_.size());
for (const auto& counter : profile_counters_) {
hlo_execution_profile_data.add_profile_counters(counter);
}
*(hlo_execution_profile_data.mutable_printer_data()) =
hlo_profile_printer_data_;
return hlo_execution_profile_data;
}
} | #include "xla/service/hlo_execution_profile.h"
#include "absl/strings/str_cat.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using absl::StrCat;
using ::testing::AllOf;
using ::testing::ContainsRegex;
class HloExecutionProfileTest : public HloTestBase {};
TEST_F(HloExecutionProfileTest, Basic) {
auto hlo_module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
lhs = f32[30,30]{1,0} parameter(0)
rhs = f32[30,30]{1,0} parameter(1)
add = f32[30,30]{1,0} add(lhs, rhs)
ROOT dot = f32[30,30]{1,0} dot(lhs, add), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const HloInstruction* dot_instruction =
hlo_module->entry_computation()->root_instruction();
const HloInstruction* add_instruction = dot_instruction->operand(1);
Shape shape = ShapeUtil::MakeShape(F32, {30, 30});
auto shape_size_function = [&](const Shape& shape) {
const int64_t pointer_size = 8;
if (shape.IsOpaque()) {
return pointer_size;
}
return ShapeUtil::ByteSizeOf(shape, pointer_size);
};
HloCostAnalysis cost_analysis(shape_size_function);
HloProfileIndexMap profile_index_map(*hlo_module);
std::unique_ptr<HloProfilePrinterData> profile_printer =
CreateHloProfilePrinterData(profile_index_map, cost_analysis,
hlo_module->entry_computation()->name());
HloExecutionProfile execution_profile(profile_printer.get(),
&profile_index_map);
const int64_t add_cycles = 1000;
const int64_t dot_cycles = 4000;
execution_profile.SetCyclesTakenBy(add_instruction, add_cycles);
execution_profile.SetCyclesTakenBy(dot_instruction, dot_cycles);
float clock_rate_ghz = backend()
.default_stream_executor()
->GetDeviceDescription()
.clock_rate_ghz();
EXPECT_THAT(execution_profile.ToString(clock_rate_ghz),
AllOf(ContainsRegex(StrCat(dot_cycles, " cycles.*%",
dot_instruction->name())),
ContainsRegex(StrCat(add_cycles, " cycles.*%",
add_instruction->name()))));
}
}
} |
1,981 | cpp | tensorflow/tensorflow | host_offload_legalize | third_party/xla/xla/service/host_offload_legalize.cc | third_party/xla/xla/service/host_offload_legalize_test.cc | #ifndef XLA_SERVICE_HOST_OFFLOAD_LEGALIZE_H_
#define XLA_SERVICE_HOST_OFFLOAD_LEGALIZE_H_
#include <cstdint>
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloCostAnalysis;
class HostOffloadLegalize : public HloModulePass {
public:
explicit HostOffloadLegalize(int64_t host_memory_space_color,
bool after_layout)
: kHostMemorySpaceColor(host_memory_space_color),
after_layout_(after_layout) {}
~HostOffloadLegalize() override = default;
absl::string_view name() const override { return "host-offload-legalize"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const int64_t kHostMemorySpaceColor;
const bool after_layout_;
};
}
#endif
#include "xla/service/host_offload_legalize.h"
#include <array>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_value.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
constexpr std::array<HloOpcode, 2> kUsersOpcodes = {HloOpcode::kSlice,
HloOpcode::kDynamicSlice};
HloInstruction* FindToHostAnnotationToUpdate(HloInstruction* instr) {
while (!instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
if ((instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kCopy &&
instr->opcode() != HloOpcode::kReshape) ||
instr->mutable_operand(0)->user_count() != 1) {
return nullptr;
}
instr = instr->mutable_operand(0);
}
return instr;
}
HloInstruction* FindToDeviceAnnotationToUpdate(HloInstruction* instr) {
while (!instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
if (instr->user_count() != 1 ||
(instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kCopy &&
!absl::c_linear_search(kUsersOpcodes, instr->opcode()))) {
return nullptr;
}
instr = instr->users()[0];
}
return instr;
}
HloInstruction* FindDUSFromAnnotation(HloInstruction* instr) {
while (instr->opcode() != HloOpcode::kDynamicUpdateSlice) {
if (instr->user_count() != 1 || (instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kReshape)) {
break;
}
instr = instr->users()[0];
}
return instr;
}
absl::StatusOr<bool> DuplicateBroadcastForEachUse(HloModule* module) {
bool split_at_least_one = false;
for (HloComputation* computation : module->computations()) {
std::vector<HloInstruction*> broadcasts;
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kBroadcast ||
!instruction->HasConstantOperand()) {
continue;
}
broadcasts.push_back(instruction);
}
for (HloInstruction* instruction : broadcasts) {
if (instruction->opcode() != HloOpcode::kBroadcast ||
!instruction->HasConstantOperand()) {
continue;
}
absl::InlinedVector<HloUse, 8> uses;
for (HloInstruction* user : instruction->users()) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) != instruction) {
continue;
}
uses.push_back(HloUse{user, i, {}});
}
}
if (uses.size() <= 1) {
VLOG(5) << "Skipping broadcast " << instruction->ToString()
<< " which has " << uses.size() << " uses";
continue;
}
VLOG(5) << "Splitting broadcast " << instruction->ToString()
<< " which has " << uses.size() << " uses";
split_at_least_one = true;
for (int i = 1; i < uses.size(); ++i) {
const HloUse& use = uses[i];
HloInstruction* new_broadcast =
instruction->parent()->AddInstruction(instruction->Clone());
VLOG(5) << "New broadcast " << new_broadcast->ToString();
TF_RETURN_IF_ERROR(use.instruction->ReplaceOperandWith(
use.operand_number, new_broadcast));
}
}
}
return split_at_least_one;
}
absl::StatusOr<std::pair<HloInstruction*, int>> WalkUpMemoryOffload(
std::pair<HloInstruction*, int> current_value,
const CallGraph& call_graph) {
auto& [instruction, index] = current_value;
switch (instruction->opcode()) {
case HloOpcode::kGetTupleElement: {
CHECK_EQ(index, -1);
return std::make_pair(instruction->mutable_operand(0),
instruction->tuple_index());
}
case HloOpcode::kBitcast:
case HloOpcode::kReshape: {
return std::make_pair(instruction->mutable_operand(0), index);
}
case HloOpcode::kTuple: {
return std::make_pair(instruction->mutable_operand(index), -1);
}
case HloOpcode::kOptimizationBarrier: {
return std::make_pair(instruction->mutable_operand(0), index);
}
case HloOpcode::kWhile: {
HloComputation* while_body = instruction->while_body();
HloInstruction* root = while_body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
return std::make_pair(root, index);
}
case HloOpcode::kParameter: {
CHECK_NE(instruction->parent(),
instruction->GetModule()->entry_computation());
auto callers = call_graph.GetComputationCallers(instruction->parent());
if (callers.size() != 1) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller");
}
auto* caller = callers[0];
if (caller->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(
"Expected to be called by a while loop");
}
return std::make_pair(caller->mutable_operand(0), index);
}
case HloOpcode::kDynamicUpdateSlice: {
return std::make_pair(instruction->mutable_operand(0), index);
}
case HloOpcode::kCustomCall: {
if (!instruction->IsCustomCall("AllocateBuffer") &&
!instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
return absl::InvalidArgumentError(
"Expected AllocateBuffer or MoveToHost custom-call");
}
return std::make_pair(instruction, index);
}
case HloOpcode::kBroadcast: {
auto* broadcast_operand = instruction->mutable_operand(0);
if (broadcast_operand->opcode() != HloOpcode::kConstant) {
return absl::InvalidArgumentError("Expected a constant as operand");
}
if (!ShapeUtil::IsEffectiveScalar(broadcast_operand->shape())) {
return absl::InvalidArgumentError("Expected a scalar broadcast");
}
return std::make_pair(instruction, index);
}
default: {
return absl::InvalidArgumentError(
absl::StrFormat("Invalid opcode %s", instruction->ToString()));
}
}
}
absl::StatusOr<std::vector<std::pair<HloInstruction*, int>>>
WalkDownMemoryOffload(const std::pair<HloInstruction*, int64_t>& current_value,
const CallGraph& call_graph) {
VLOG(5) << "Current value in progress: " << current_value.first->ToString()
<< " idx: " << current_value.second;
std::vector<std::pair<HloInstruction*, int>> results;
auto add_gte_for_idx = [&results](HloInstruction* instr,
int idx) -> absl::Status {
HloInstruction* gte = nullptr;
for (HloInstruction* user : instr->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return absl::InvalidArgumentError(
"Expected users to be only get-tuple-elements");
}
if (user->tuple_index() != idx) {
continue;
}
if (gte != nullptr) {
return absl::InvalidArgumentError(
"Expected to find only one gte per index.");
}
results.push_back(std::make_pair(user, -1));
}
return absl::OkStatus();
};
if (current_value.first->user_count() == 0) {
if (current_value.first->parent()->root_instruction() ==
current_value.first) {
auto callers =
call_graph.GetComputationCallers(current_value.first->parent());
if (callers.size() != 1 || callers[0]->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller and caller be a While");
}
TF_RETURN_IF_ERROR(add_gte_for_idx(callers[0], current_value.second));
return results;
}
}
if (current_value.first->opcode() == HloOpcode::kParameter &&
current_value.first->shape().IsTuple()) {
TF_RETURN_IF_ERROR(
add_gte_for_idx(current_value.first, current_value.second));
return results;
}
for (HloInstruction* user : current_value.first->users()) {
switch (user->opcode()) {
case HloOpcode::kGetTupleElement: {
CHECK_NE(user->tuple_index(), -1);
if (user->tuple_index() != current_value.second) {
continue;
}
results.push_back(std::make_pair(user, -1));
break;
}
case HloOpcode::kTuple: {
auto output_indices = user->OperandIndices(current_value.first);
if (output_indices.size() != 1) {
return absl::InvalidArgumentError(
"Expected operand to be used only once in the tuple.");
}
results.push_back(std::make_pair(user, output_indices[0]));
break;
}
case HloOpcode::kOptimizationBarrier: {
results.push_back(std::make_pair(user, current_value.second));
break;
}
case HloOpcode::kWhile: {
HloComputation* while_body = user->while_body();
HloInstruction* parameter = while_body->parameter_instruction(0);
results.push_back(std::make_pair(parameter, current_value.second));
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (user->OperandIndices(current_value.first)[0] != 0) {
return absl::InvalidArgumentError(
"Expected to be used by first operand of dynamic-update-slice");
}
results.push_back(std::make_pair(user, current_value.second));
break;
}
case HloOpcode::kCustomCall: {
if (user->IsCustomCall(host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget)) {
results.push_back(std::make_pair(user, current_value.second));
break;
}
return absl::InvalidArgumentError("Invalid custom-call found.");
}
case HloOpcode::kBitcast:
case HloOpcode::kCopy:
case HloOpcode::kDynamicSlice:
case HloOpcode::kReshape:
case HloOpcode::kSlice: {
results.push_back(std::make_pair(user, current_value.second));
break;
}
default: {
return absl::InvalidArgumentError("Unrecognized user opcode");
}
}
}
return results;
}
absl::StatusOr<bool> ProcessAnnotationForCopyMovement(
HloInstruction* instruction, const CallGraph* call_graph,
absl::flat_hash_set<HloInstruction*>& processed_annotations,
std::vector<HloInstruction*>& to_remove) {
auto is_entry_computation_parameter = [](HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kParameter &&
instruction->parent()->IsEntryComputation();
};
if (instruction->IsRoot()) {
return false;
}
if (instruction->user_count() == 0) {
return false;
}
HloInstruction* starting_instr =
FindDUSFromAnnotation(instruction->users().at(0));
if (starting_instr->opcode() != HloOpcode::kDynamicUpdateSlice) {
starting_instr = instruction;
}
VLOG(3) << "Dus or Annotation: " << starting_instr->ToString();
std::pair<HloInstruction*, int> current_value =
std::make_pair(starting_instr, -1);
processed_annotations.insert(current_value.first);
if (!current_value.first->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) &&
!is_entry_computation_parameter(current_value.first)) {
CHECK_EQ(current_value.first->opcode(), HloOpcode::kDynamicUpdateSlice);
while (true) {
VLOG(10) << "Current value before: " << current_value.first->ToString();
auto current_value_up = WalkUpMemoryOffload(current_value, *call_graph);
if (!current_value_up.ok()) {
return false;
}
if (current_value_up.value() == current_value) {
break;
}
current_value = current_value_up.value();
VLOG(10) << "Current value after: " << current_value.first->ToString();
HloInstruction* annotation = current_value.first;
if (annotation->opcode() == HloOpcode::kDynamicUpdateSlice) {
HloInstruction* real_annotation =
FindToHostAnnotationToUpdate(annotation->mutable_operand(1));
if (!real_annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
return false;
}
}
}
}
std::vector<std::pair<HloInstruction*, int>> copies_to_move;
std::vector<std::pair<HloInstruction*, int>> stack(1, current_value);
while (!stack.empty()) {
VLOG(5) << "Current value before down: " << stack.back().first->ToString();
if (absl::c_linear_search(kUsersOpcodes, stack.back().first->opcode()) ||
stack.back().first->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
HloInstruction* annotation =
FindToDeviceAnnotationToUpdate(stack.back().first);
if (!annotation ||
!annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
VLOG(5) << "Couldn't find annotation for consumer instruction in chain";
return false;
}
if (annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
for (HloInstruction* user : annotation->users()) {
HloInstruction* root_instruction =
annotation->parent()->root_instruction();
if (root_instruction == user &&
root_instruction->opcode() == HloOpcode::kTuple) {
auto callers =
call_graph->GetComputationCallers(annotation->parent());
if (callers.size() != 1 ||
callers[0]->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller and caller be a "
"While");
}
for (int i = 0; i < user->operands().size(); i++) {
if (user->operands()[i] == annotation &&
annotation->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
annotation->operand(0)->operand(0)->opcode() ==
HloOpcode::kParameter &&
annotation->operand(0)->tuple_index() == i) {
user->ReplaceOperandWith(i, annotation->mutable_operand(0))
.IgnoreError();
}
}
}
}
}
stack.pop_back();
continue;
}
auto current_value_down = WalkDownMemoryOffload(stack.back(), *call_graph);
if (!current_value_down.ok()) {
VLOG(5) << "Current value down failed: " << current_value_down.status();
break;
}
stack.pop_back();
stack.insert(stack.end(), current_value_down.value().begin(),
current_value_down.value().end());
for (auto& instruction : current_value_down.value()) {
VLOG(5) << "Current value last down: " << stack.back().first->ToString();
if (instruction.first->opcode() == HloOpcode::kCopy) {
copies_to_move.push_back(instruction);
}
}
}
auto update_shape_layout =
[&](const std::pair<HloInstruction*, int>& instruction,
HloInstruction* copy_to_move) {
VLOG(5) << "Update shape layout: " << instruction.first->ToString()
<< " " << instruction.second;
if (instruction.second != -1) {
*instruction.first->mutable_shape()
->mutable_tuple_shapes(instruction.second)
->mutable_layout() = copy_to_move->operand(0)->shape().layout();
} else {
*instruction.first->mutable_shape()->mutable_layout() =
copy_to_move->operand(0)->shape().layout();
}
if (instruction.first->opcode() == HloOpcode::kWhile) {
Shape new_shape = copy_to_move->operand(0)->shape();
*instruction.first->while_body()
->root_instruction()
->mutable_shape()
->mutable_tuple_shapes(instruction.second)
->mutable_layout() = new_shape.layout();
*instruction.first->while_condition()
->parameter_instruction(0)
->mutable_shape()
->mutable_tuple_shapes(instruction.second)
->mutable_layout() = new_shape.layout();
}
};
while (!copies_to_move.empty()) {
auto& copy_to_move = copies_to_move.back();
VLOG(5) << "Copy to move: " << copy_to_move.first->ToString();
stack.clear();
stack.push_back(copy_to_move);
while (!stack.empty()) {
VLOG(5) << "Current value before down: " << stack.back().first->ToString()
<< " " << stack.back().second;
auto current_value_down =
WalkDownMemoryOffload(stack.back(), *call_graph);
if (!current_value_down.ok()) {
VLOG(5) << "Current value down failed: " << current_value_down.status();
break;
}
for (auto& instruction : current_value_down.value()) {
update_shape_layout(instruction, copy_to_move.first);
if (instruction.first->opcode() == HloOpcode::kParameter) {
auto callers =
call_graph->GetComputationCallers(instruction.first->parent());
if (callers.size() != 1) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller");
}
auto* caller = callers[0];
update_shape_layout(std::make_pair(caller, instruction.second),
copy_to_move.first);
}
}
stack.pop_back();
for (auto& instruction : current_value_down.value()) {
VLOG(5) << "Current value last down: " << instruction.first->ToString();
CHECK_NE(instruction.first->opcode(), HloOpcode::kCopy)
<< "Copies should be processed in order";
if (absl::c_linear_search(kUsersOpcodes, instruction.first->opcode()) ||
instruction.first->IsCustomCall(
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget)) {
HloInstruction* annotation =
FindToDeviceAnnotationToUpdate(instruction.first);
CHECK_NE(annotation, nullptr)
<< "We already verified we could find an annotation here. "
"Something went wrong.";
HloInstruction* new_annotation = nullptr;
if (instruction.first->opcode() == HloOpcode::kCustomCall) {
new_annotation = annotation;
} else {
new_annotation = instruction.first->AddInstruction(
annotation->CloneWithNewOperands(instruction.first->shape(),
{instruction.first}));
}
update_shape_layout(std::make_pair(new_annotation, -1),
copy_to_move.first);
Shape new_copy_shape = new_annotation->shape();
*new_copy_shape.mutable_layout() =
copy_to_move.first->shape().layout();
HloInstruction* new_copy = instruction.first->AddInstruction(
copy_to_move.first->CloneWithNewOperands(new_copy_shape,
{new_annotation}));
std::vector<HloInstruction*> users = instruction.first->users();
for (auto* use : users) {
if (use == new_copy || use == new_annotation) {
continue;
}
TF_RETURN_IF_ERROR(
instruction.first->ReplaceUseWithDifferentShape(use, new_copy));
}
if (new_annotation != annotation) {
TF_RETURN_IF_ERROR(annotation->ReplaceAllUsesWithDifferentShape(
annotation->mutable_operand(0)));
to_remove.push_back(annotation);
}
continue;
}
if (instruction.first->opcode() == HloOpcode::kDynamicUpdateSlice) {
HloInstruction* annotation = FindToHostAnnotationToUpdate(
instruction.first->mutable_operand(1));
if (annotation == nullptr) {
CHECK(false);
return false;
}
CHECK(annotation->opcode() == HloOpcode::kCustomCall);
HloInstruction* new_annotation = instruction.first->AddInstruction(
annotation->CloneWithNewOperands(
instruction.first->operand(1)->shape(),
{instruction.first->mutable_operand(1)}));
TF_RETURN_IF_ERROR(
instruction.first->ReplaceOperandWith(1, new_annotation));
TF_RETURN_IF_ERROR(
annotation->ReplaceAllUsesWith(annotation->mutable_operand(0)));
processed_annotations.insert(annotation);
processed_annotations.insert(new_annotation);
to_remove.push_back(annotation);
}
stack.push_back(instruction);
}
}
VLOG(5) << "MOVED: " << copy_to_move.first->ToString();
TF_RETURN_IF_ERROR(copy_to_move.first->ReplaceAllUsesWithDifferentShape(
copy_to_move.first->mutable_operand(0)));
TF_RETURN_IF_ERROR(
copy_to_move.first->parent()->RemoveInstruction(copy_to_move.first));
copies_to_move.pop_back();
}
return true;
}
absl::StatusOr<bool> FixupInterveningCopies(
const std::vector<HloInstruction*>& copy_to_host_annotations,
const CallGraph* call_graph) {
absl::flat_hash_set<HloInstruction*> processed_annotations;
std::vector<HloInstruction*> annotations_to_remove;
bool changed = false;
for (HloInstruction* instruction : copy_to_host_annotations) {
if (processed_annotations.contains(instruction)) {
continue;
}
TF_ASSIGN_OR_RETURN(bool changed_annotation_for_copy_movement,
ProcessAnnotationForCopyMovement(
instruction, call_graph, processed_annotations,
annotations_to_remove));
changed |= changed_annotation_for_copy_movement;
}
for (HloInstruction* instruction : annotations_to_remove) {
TF_RETURN_IF_ERROR(instruction->parent()->RemoveInstruction(instruction));
}
return changed;
}
}
absl::StatusOr<bool> HostOffloadLegalize::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(bool duplicated_at_least_one_broadcast,
DuplicateBroadcastForEachUse(module));
if (duplicated_at_least_one_broadcast) {
changed = true;
}
if (!after_layout_) {
return changed;
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
std::vector<HloInstruction*> copy_to_host_annotations;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter &&
instruction->parent()->IsEntryComputation()) {
Shape param_shape =
module->entry_computation_layout()
.parameter_layout(instruction->parameter_number())
.shape();
if (param_shape.has_layout() &&
param_shape.layout().memory_space() == kHostMemorySpaceColor) {
copy_to_host_annotations.push_back(instruction);
continue;
}
}
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
copy_to_host_annotations.push_back(instruction);
}
}
}
TF_ASSIGN_OR_RETURN(
bool changed_intervening_copies,
FixupInterveningCopies(copy_to_host_annotations, call_graph.get()));
changed |= changed_intervening_copies;
return changed;
}
} | #include "xla/service/host_offload_legalize.h"
#include <cstdint>
#include <stack>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace {
class HostOffloadLegalizeTest : public HloTestBase {
protected:
static constexpr int64_t kHostMemorySpaceColor{5};
absl::StatusOr<bool> RunHostOffloadLegalize(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostOffloadLegalize host_offload_legalize(kHostMemorySpaceColor,
true);
return host_offload_legalize.Run(module);
}
void TestShapeHasMemorySpace(const Shape& shape, int64_t memory_space) {
ASSERT_TRUE(shape.has_layout());
EXPECT_EQ(shape.layout().memory_space(), memory_space);
}
bool HaveRemainingOffloadAnnotations(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget,
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget})) {
return true;
}
}
}
return false;
}
};
TEST_F(HostOffloadLegalizeTest, NoCopyWithOptBarrierMoreElaborate) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main.24 {
Arg_0.1 = f32[16,256]{0,1} parameter(0)
cosine.4 = f32[16,256]{0,1} cosine(Arg_0.1)
custom-call.5 = f32[16,256]{0,1} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16,256]{0,1} sine(Arg_0.1)
cosine.7 = f32[16,256]{0,1} cosine(sine.3)
custom-call.8 = f32[16,256]{0,1} custom-call(cosine.7), custom_call_target="MoveToHost"
sine.6 = f32[16,256]{0,1} sine(sine.3)
cosine.9 = f32[16,256]{0,1} cosine(sine.6)
custom-call.10 = f32[16,256]{0,1} custom-call(cosine.9), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
cp = f32[16,256]{1,0} copy(custom-call.8)
tuple.11 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{0,1}, f32[]) tuple(custom-call.5, cp, custom-call.10, constant.2)
opt-barrier.12 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{0,1}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16,256]{0,1} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16,256]{0,1} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16,256]{0,1} multiply(broadcast.20, custom-call.19)
cp2 = f32[16,256]{1,0} copy(multiply.21)
get-tuple-element.14 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16,256]{1,0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16,256]{1,0} multiply(cp2, custom-call.18)
get-tuple-element.13 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16,256]{0,1} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
cp3 = f32[16,256]{1,0} copy(custom-call.17)
ROOT multiply.23 = f32[16,256]{1,0} multiply(multiply.22, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.18");
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(), LayoutUtil::MakeLayout({0, 1}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
}
TEST_F(HostOffloadLegalizeTest, XposeCopyOnParameterStreaming) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1},f32[16,256]{0,1:T(8,128)S(5)})->f32[16,256]{1,0}}
ENTRY main.24 {
Arg_0.1 = f32[16,256]{0,1} parameter(0)
Arg_0.2 = f32[16,256]{0,1:T(8,128)} parameter(1)
cp0 = f32[16,256]{1,0} copy(Arg_0.2)
cosine.4 = f32[16,256]{0,1} cosine(Arg_0.1)
custom-call.5 = f32[16,256]{0,1} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16,256]{0,1} sine(Arg_0.1)
cosine.7 = f32[16,256]{0,1} cosine(sine.3)
custom-call.8 = f32[16,256]{0,1} custom-call(cosine.7), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
cp1 = f32[16,256]{1,0} copy(custom-call.8)
tuple.11 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{1,0}, f32[]) tuple(custom-call.5, cp1, cp0, constant.2)
opt-barrier.12 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{1,0}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16,256]{0,1} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16,256]{1,0} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16,256]{0,1} multiply(broadcast.20, custom-call.19)
cp2 = f32[16,256]{1,0} copy(multiply.21)
get-tuple-element.14 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16,256]{1,0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16,256]{1,0} multiply(cp2, custom-call.18)
get-tuple-element.13 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16,256]{0,1} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
cp3 = f32[16,256]{1,0} copy(custom-call.17)
ROOT multiply.23 = f32[16,256]{1,0} multiply(multiply.22, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.18");
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(), LayoutUtil::MakeLayout({0, 1}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
custom_call = FindInstruction(module.get(), "custom-call.19");
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({0, 1}, {}, {}, {}, {Tile{{8, 128}}}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
}
TEST_F(HostOffloadLegalizeTest, LlmActivationHostMemoryMultipleConsumers) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
cs0 = f32[] constant(0)
broadcast_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} broadcast(cs0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(producing_while), index=1
cp = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(while_output_1)
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(constant_s32_0, cp)
consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy = FindInstruction(module.get(), HloOpcode::kCopy);
HloInstruction* consuming_while =
FindInstruction(module.get(), "consuming_while");
EXPECT_NE(copy, nullptr);
EXPECT_NE(consuming_while, nullptr);
EXPECT_EQ(copy->parent(), consuming_while->while_body());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, LlmActivationHostMemoryMultipleCopies) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
cs0 = f32[] constant(0)
broadcast_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} broadcast(cs0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(producing_while), index=1
cp = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(while_output_1)
cp1 = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(cp)
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(constant_s32_0, cp1)
consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_0 = FindInstruction(module.get(), "cp.2");
HloInstruction* copy_1 = FindInstruction(module.get(), "cp1.2");
HloInstruction* consuming_while =
FindInstruction(module.get(), "consuming_while");
EXPECT_NE(copy_0, nullptr);
EXPECT_NE(copy_1, nullptr);
EXPECT_NE(consuming_while, nullptr);
EXPECT_EQ(copy_0->parent(), module->entry_computation());
EXPECT_EQ(copy_1->operand(0), copy_0);
XLA_VLOG_LINES(1, module->ToString());
}
}
} |
1,982 | cpp | tensorflow/tensorflow | hlo_cost_analysis | third_party/xla/xla/service/hlo_cost_analysis.cc | third_party/xla/xla/service/hlo_cost_analysis_test.cc | #ifndef XLA_SERVICE_HLO_COST_ANALYSIS_H_
#define XLA_SERVICE_HLO_COST_ANALYSIS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
class HloCostAnalysis : public ConstDfsHloVisitor {
public:
static inline constexpr absl::string_view kFlopsKey = "flops";
static inline constexpr absl::string_view kTranscendentalsKey =
"transcendentals";
static inline constexpr absl::string_view kBytesAccessedKey =
"bytes accessed";
static inline constexpr absl::string_view kOptimalSecondsKey =
"optimal_seconds";
static inline constexpr absl::string_view kUtilizationKey = "utilization";
static inline constexpr absl::string_view kReserved0Key = "reserved0";
static inline constexpr absl::string_view kReserved1Key = "reserved1";
class Properties {
public:
Properties()
: flops_(0),
transcendentals_(0),
bytes_accessed_(0),
optimal_seconds_(0),
utilization_(0),
operand0_utilization_(0),
operand1_utilization_(0),
operand0_bytes_accessed_(0),
operand1_bytes_accessed_(0),
output_root_bytes_accessed_(0),
reserved0_(0),
reserved1_(0) {
DCHECK_EQ(kOperand0UtilizationKey, GetOperandUtilizationKey(0, {}));
DCHECK_EQ(kOperand1UtilizationKey, GetOperandUtilizationKey(1, {}));
DCHECK_EQ(kOperand0BytesAccessedKey, GetOperandBytesAccessedKey(0, {}));
DCHECK_EQ(kOperand1BytesAccessedKey, GetOperandBytesAccessedKey(1, {}));
DCHECK_EQ(kOutputRootBytesAccessedKey, GetOutputBytesAccessedKey({}));
}
float& operator[](absl::string_view property) {
if (property == kFlopsKey) {
return flops_;
}
if (property == kTranscendentalsKey) {
return transcendentals_;
}
if (property == kBytesAccessedKey) {
return bytes_accessed_;
}
if (property == kOptimalSecondsKey) {
return optimal_seconds_;
}
if (property == kUtilizationKey) {
return utilization_;
}
if (property == kOperand0UtilizationKey) {
return operand0_utilization_;
}
if (property == kOperand1UtilizationKey) {
return operand1_utilization_;
}
if (property == kOperand0BytesAccessedKey) {
return operand0_bytes_accessed_;
}
if (property == kOperand1BytesAccessedKey) {
return operand1_bytes_accessed_;
}
if (property == kOutputRootBytesAccessedKey) {
return output_root_bytes_accessed_;
}
if (property == kReserved0Key) {
return reserved0_;
}
if (property == kReserved1Key) {
return reserved1_;
}
auto it = named_props_.lazy_emplace(property, [&](const auto& ctor) {
ctor(std::string(property), 0.f);
});
return it->second;
}
float operator[](absl::string_view property) const {
if (property == kFlopsKey) {
return flops_;
}
if (property == kTranscendentalsKey) {
return transcendentals_;
}
if (property == kBytesAccessedKey) {
return bytes_accessed_;
}
if (property == kOptimalSecondsKey) {
return optimal_seconds_;
}
if (property == kUtilizationKey) {
return utilization_;
}
if (property == kOperand0UtilizationKey) {
return operand0_utilization_;
}
if (property == kOperand1UtilizationKey) {
return operand1_utilization_;
}
if (property == kOperand0BytesAccessedKey) {
return operand0_bytes_accessed_;
}
if (property == kOperand1BytesAccessedKey) {
return operand1_bytes_accessed_;
}
if (property == kOutputRootBytesAccessedKey) {
return output_root_bytes_accessed_;
}
if (property == kReserved0Key) {
return reserved0_;
}
if (property == kReserved1Key) {
return reserved1_;
}
auto it = named_props_.find(property);
if (it != named_props_.end()) {
return it->second;
}
return 0;
}
template <typename Fn>
void ForEach(Fn&& fn) const {
if (flops_ != 0) {
fn(kFlopsKey, flops_);
}
if (transcendentals_ != 0) {
fn(kTranscendentalsKey, transcendentals_);
}
if (bytes_accessed_ != 0) {
fn(kBytesAccessedKey, bytes_accessed_);
}
if (optimal_seconds_ != 0) {
fn(kOptimalSecondsKey, optimal_seconds_);
}
if (utilization_ != 0) {
fn(kUtilizationKey, utilization_);
}
if (operand0_utilization_ != 0) {
fn(kOperand0UtilizationKey, operand0_utilization_);
}
if (operand1_utilization_ != 0) {
fn(kOperand1UtilizationKey, operand1_utilization_);
}
if (operand0_bytes_accessed_ != 0) {
fn(kOperand0BytesAccessedKey, operand0_bytes_accessed_);
}
if (operand1_bytes_accessed_ != 0) {
fn(kOperand1BytesAccessedKey, operand1_bytes_accessed_);
}
if (output_root_bytes_accessed_ != 0) {
fn(kOutputRootBytesAccessedKey, output_root_bytes_accessed_);
}
if (reserved0_ != 0) {
fn(kReserved0Key, reserved0_);
}
if (reserved1_ != 0) {
fn(kReserved1Key, reserved1_);
}
for (const auto& [k, v] : named_props_) {
if (v != 0) {
fn(k, v);
}
}
}
float operand_utilization(int64_t operand,
const ShapeIndex& shape_index = {}) {
if (operand == 0 && shape_index.empty()) {
return operand0_utilization_;
}
if (operand == 1 && shape_index.empty()) {
return operand1_utilization_;
}
auto it =
named_props_.find(GetOperandUtilizationKey(operand, shape_index));
if (it != named_props_.end()) {
return it->second;
}
return 0;
}
void set_operand_utilization(int64_t operand, float value) {
set_operand_utilization(operand, {}, value);
}
void set_operand_utilization(int64_t operand, const ShapeIndex& shape_index,
float value) {
if (operand == 0 && shape_index.empty()) {
operand0_utilization_ = value;
} else if (operand == 1 && shape_index.empty()) {
operand1_utilization_ = value;
} else {
named_props_[GetOperandUtilizationKey(operand, shape_index)] = value;
}
}
float operand_bytes_accessed(int64_t operand,
const ShapeIndex& shape_index = {}) {
if (operand == 0 && shape_index.empty()) {
return operand0_bytes_accessed_;
}
if (operand == 1 && shape_index.empty()) {
return operand1_bytes_accessed_;
}
auto it =
named_props_.find(GetOperandBytesAccessedKey(operand, shape_index));
if (it != named_props_.end()) {
return it->second;
}
return 0;
}
void set_operand_bytes_accessed(int64_t operand, float value) {
set_operand_bytes_accessed(operand, {}, value);
}
void set_operand_bytes_accessed(int64_t operand,
const ShapeIndex& shape_index,
float value) {
if (operand == 0 && shape_index.empty()) {
operand0_bytes_accessed_ = value;
} else if (operand == 1 && shape_index.empty()) {
operand1_bytes_accessed_ = value;
} else {
named_props_[GetOperandBytesAccessedKey(operand, shape_index)] = value;
}
}
float output_bytes_accessed(const ShapeIndex& shape_index = {}) {
if (shape_index.empty()) {
return output_root_bytes_accessed_;
}
auto it = named_props_.find(GetOutputBytesAccessedKey(shape_index));
if (it != named_props_.end()) {
return it->second;
}
return 0;
}
void set_output_bytes_accessed(float value) {
set_output_bytes_accessed({}, value);
}
void set_output_bytes_accessed(const ShapeIndex& shape_index, float value) {
if (shape_index.empty()) {
output_root_bytes_accessed_ = value;
} else {
named_props_[GetOutputBytesAccessedKey(shape_index)] = value;
}
}
std::string ToString() const {
return absl::StrFormat(
"HloCostAnalysis::Properties{\n"
" flops: %f,\n"
" transcendentals: %f\n"
" bytes_accessed: %f\n"
" optimal_seconds: %f\n"
" utilization: %f\n"
" operand0_utilization: %f\n"
" operand1_utilization: %f\n"
" operand0_bytes_accessed: %f\n"
" operand1_bytes_accessed: %f\n"
" output_root_bytes_accessed: %f\n"
" reserved0: %f\n"
" reserved1: %f\n"
"}",
flops_, transcendentals_, bytes_accessed_, optimal_seconds_,
utilization_, operand0_utilization_, operand1_utilization_,
operand0_bytes_accessed_, operand1_bytes_accessed_,
output_root_bytes_accessed_, reserved0_, reserved1_);
}
private:
static inline constexpr absl::string_view kOperand0UtilizationKey =
"utilization0{}";
static inline constexpr absl::string_view kOperand1UtilizationKey =
"utilization1{}";
static inline constexpr absl::string_view kOperand0BytesAccessedKey =
"bytes accessed0{}";
static inline constexpr absl::string_view kOperand1BytesAccessedKey =
"bytes accessed1{}";
static inline constexpr absl::string_view kOutputRootBytesAccessedKey =
"bytes accessedout{}";
float flops_;
float transcendentals_;
float bytes_accessed_;
float optimal_seconds_;
float utilization_;
float operand0_utilization_;
float operand1_utilization_;
float operand0_bytes_accessed_;
float operand1_bytes_accessed_;
float output_root_bytes_accessed_;
float reserved0_;
float reserved1_;
absl::flat_hash_map<std::string, float> named_props_;
};
using ShapeSizeFunction = std::function<int64_t(const Shape&)>;
struct Options {
ShapeSizeFunction shape_size;
Properties per_second_rates = {};
bool count_multiple_input_accesses = false;
void set_flops_per_second(float value) {
per_second_rates[kFlopsKey] = value;
}
void set_transcendentals_per_second(float value) {
per_second_rates[kTranscendentalsKey] = value;
}
void set_bytes_per_second(float value) {
per_second_rates[kBytesAccessedKey] = value;
}
float per_second_rate(absl::string_view key) const {
return per_second_rates[key];
}
std::string ToString() const {
return absl::StrFormat(
"HloCostAnalysis::Options{\n"
" per_second_rates: %s\n"
" count_multiple_input_accesses: %d\n"
"}",
per_second_rates.ToString(), count_multiple_input_accesses);
}
};
explicit HloCostAnalysis(const Options& options);
explicit HloCostAnalysis(ShapeSizeFunction shape_size,
const Properties& per_second_rates = {});
absl::Status HandleElementwiseUnary(const HloInstruction* hlo) override;
absl::Status HandleElementwiseBinary(const HloInstruction* hlo) override;
absl::Status HandleConstant(const HloInstruction* constant) override;
absl::Status HandleIota(const HloInstruction* iota) override;
absl::Status HandleGetTupleElement(
const HloInstruction* get_tuple_element) override;
absl::Status HandleSelect(const HloInstruction* hlo) override;
absl::Status HandleCompare(const HloInstruction* compare) override;
absl::Status HandleClamp(const HloInstruction* clamp) override;
absl::Status HandleReducePrecision(const HloInstruction* hlo) override;
absl::Status HandleConcatenate(const HloInstruction* concatenate) override;
absl::Status HandleAsyncStart(const HloInstruction* async_start) override;
absl::Status HandleAsyncUpdate(const HloInstruction* async_update) override;
absl::Status HandleAsyncDone(const HloInstruction* async_done) override;
absl::Status HandleCopyStart(const HloInstruction* send) override;
absl::Status HandleCopyDone(const HloInstruction* send_done) override;
absl::Status HandleSend(const HloInstruction* send) override;
absl::Status HandleSendDone(const HloInstruction* send_done) override;
absl::Status HandleRecv(const HloInstruction* recv) override;
absl::Status HandleRecvDone(const HloInstruction* recv_done) override;
absl::Status HandleConvert(const HloInstruction* convert) override;
absl::Status HandleCopy(const HloInstruction* copy) override;
absl::Status HandleDomain(const HloInstruction* domain) override;
absl::Status HandleDot(const HloInstruction* dot) override;
absl::Status HandleConvolution(const HloInstruction* convolution) override;
absl::Status HandleFft(const HloInstruction* fft) override;
absl::Status HandleTriangularSolve(const HloInstruction* hlo) override;
absl::Status HandleCholesky(const HloInstruction* hlo) override;
absl::Status HandleOptimizationBarrier(const HloInstruction* hlo) override;
absl::Status HandleAllGather(const HloInstruction* hlo) override;
absl::Status HandleAllGatherStart(const HloInstruction* hlo) override;
absl::Status HandleAllGatherDone(const HloInstruction* hlo) override;
absl::Status HandleAllReduce(const HloInstruction* crs) override;
absl::Status HandleReduceScatter(const HloInstruction* hlo) override;
absl::Status HandleAllReduceStart(const HloInstruction* hlo) override;
absl::Status HandleAllReduceDone(const HloInstruction* hlo) override;
absl::Status HandleAllToAll(const HloInstruction* hlo) override;
absl::Status HandleCollectiveBroadcast(const HloInstruction* hlo) override;
absl::Status HandleCollectivePermute(const HloInstruction* hlo) override;
absl::Status HandleCollectivePermuteStart(const HloInstruction* hlo) override;
absl::Status HandleCollectivePermuteDone(const HloInstruction* hlo) override;
absl::Status HandleReplicaId(const HloInstruction* hlo) override;
absl::Status HandlePartitionId(const HloInstruction* hlo) override;
absl::Status HandleInfeed(const HloInstruction* infeed) override;
absl::Status HandleOutfeed(const HloInstruction* outfeed) override;
absl::Status HandleRng(const HloInstruction* random) override;
absl::Status HandleRngBitGenerator(const HloInstruction* random) override;
absl::Status HandleRngGetAndUpdateState(
const HloInstruction* random) override;
absl::Status HandleReverse(const HloInstruction* reverse) override;
absl::Status HandleSort(const HloInstruction* sort) override;
absl::Status HandleParameter(const HloInstruction* parameter) override;
absl::Status HandleReduce(const HloInstruction* reduce) override;
absl::Status HandleBatchNormTraining(
const HloInstruction* batch_norm_training) override;
absl::Status HandleBatchNormInference(
const HloInstruction* batch_norm_inference) override;
absl::Status HandleBatchNormGrad(
const HloInstruction* batch_norm_grad) override;
absl::Status HandleFusion(const HloInstruction* fusion) override;
absl::Status HandleCall(const HloInstruction* call) override;
absl::Status HandleCustomCall(const HloInstruction* custom_call) override;
absl::Status HandleSlice(const HloInstruction* slice) override;
absl::Status HandleDynamicSlice(const HloInstruction* dynamic_slice) override;
absl::Status HandleDynamicUpdateSlice(
const HloInstruction* dynamic_update_slice) override;
absl::Status HandleTuple(const HloInstruction* tuple) override;
absl::Status HandleMap(const HloInstruction* map) override;
absl::Status HandleReduceWindow(const HloInstruction* reduce_window) override;
absl::Status HandleSelectAndScatter(
const HloInstruction* instruction) override;
absl::Status HandleBitcast(const HloInstruction* bitcast) override;
absl::Status HandleBroadcast(const HloInstruction* broadcast) override;
absl::Status HandlePad(const HloInstruction* pad) override;
absl::Status HandleReshape(const HloInstruction* reshape) override;
absl::Status HandleDynamicReshape(const HloInstruction* reshape) override;
absl::Status HandleAddDependency(
const HloInstruction* add_dependency) override;
absl::Status HandleAfterAll(const HloInstruction* token) override;
absl::Status HandleTranspose(const HloInstruction* transpose) override;
absl::Status HandleWhile(const HloInstruction* xla_while) override;
absl::Status HandleConditional(const HloInstruction* conditional) override;
absl::Status HandleGather(const HloInstruction* gather) override;
absl::Status HandleScatter(const HloInstruction* hlo) override;
absl::Status HandleGetDimensionSize(const HloInstruction* get_size) override;
absl::Status HandleSetDimensionSize(const HloInstruction* set_size) override;
absl::Status HandleTopK(const HloInstruction* topk) override;
absl::Status FinishVisit(const HloInstruction* root) override;
absl::Status Preprocess(const HloInstruction* hlo) override;
absl::Status Postprocess(const HloInstruction* hlo) override;
absl::Status RemoveInstruction(HloInstruction* instruction);
absl::Status RevisitInstruction(HloInstruction* instruction);
int64_t GetShapeSize(const Shape& shape) const;
float flop_count() const;
float transcendental_count() const;
float bytes_accessed() const;
float optimal_seconds() const;
Properties properties(const HloInstruction& hlo) const;
int64_t flop_count(const HloInstruction& hlo) const;
int64_t transcendental_count(const HloInstruction& hlo) const;
int64_t bytes_accessed(const HloInstruction& hlo) const;
int64_t operand_bytes_accessed(const HloInstruction& hlo, int64_t operand_num,
ShapeIndex index = {}) const;
float operand_utilization(const HloInstruction& hlo, int64_t operand_num,
ShapeIndex index = {}) const;
int64_t output_bytes_accessed(const HloInstruction& hlo,
ShapeIndex index = {}) const;
float optimal_seconds(const HloInstruction& hlo) const;
int64_t GetBytesRead(
const HloInstruction& hlo,
std::optional<int64_t> memory_space = std::nullopt) const;
int64_t GetBytesWritten(
const HloInstruction& hlo,
std::optional<int64_t> memory_space = std::nullopt) const;
const Properties& properties() const { return properties_sum_; }
float property(absl::string_view key) { return properties_sum_[key]; }
float per_second_rate(absl::string_view key) const {
return options_.per_second_rate(key);
}
static std::string GetOperandBytesAccessedKey(int64_t operand_num,
const ShapeIndex& index = {});
static std::string GetOperandUtilizationKey(int64_t operand_num,
const ShapeIndex& index = {});
static std::string GetOutputBytesAccessedKey(const ShapeIndex& index = {});
virtual int64_t GetConvolutionFlops(const HloInstruction* convolution);
static int64_t GetConvolutionFlops(const HloInstruction* convolutions,
const Shape& lhs_shape,
const Shape& rhs_shape,
const Shape& result_shape);
static int64_t GetDotFlops(const Shape& lhs_shape, const Shape& result_shape,
const DotDimensionNumbers& dnums);
protected:
virtual absl::Status FusionProcessOutputBytesAccessed(
const HloInstruction* fusion);
virtual absl::Status FusionProcessOperandBytesRead(
const HloInstruction* fusion);
virtual absl::Status FusionCountConstantsMemoryAccess(
const HloInstruction* fusion);
virtual bool ShouldFilterFusionInput(const HloInstruction* fusion,
int64_t input_index) {
return false;
}
virtual bool ShouldFilterFusionInstruction(
const HloInstruction* fusion, const HloInstruction* instruction) {
return false;
}
virtual bool ShouldFilterFusionOutputIndex(const HloInstruction* fusion,
const ShapeIndex& output_index) {
return false;
}
typedef absl::flat_hash_map<const HloInstruction*, Properties>
HloToProperties;
static constexpr int64_t kFmaFlops = 2;
virtual size_t immediate_constant_max_elements() const { return 1; }
virtual std::unique_ptr<HloCostAnalysis> CreateNestedCostAnalysis();
virtual absl::StatusOr<Properties> ProcessSubcomputation(
HloComputation* computation);
absl::Status HandleElementwiseOp(const HloInstruction* hlo_instruction);
static float GetPropertyForHlo(const HloInstruction& hlo,
absl::string_view key,
const HloToProperties& hlo_to_properties);
virtual int64_t FusionParameterReadBytes(const HloInstruction* hlo) const;
virtual absl::Status FusionCalculateUtilizations(
const HloInstruction* fusion);
HloToProperties hlo_properties_;
bool current_should_compute_bottleneck_time_;
Properties current_properties_;
Properties properties_sum_;
Options options_;
virtual bool KeyToCopyFromSubcomputation(absl::string_view key) const;
HloCostAnalysis(const HloCostAnalysis&) = delete;
HloCostAnalysis& operator=(const HloCostAnalysis&) = delete;
};
}
#endif
#include "xla/service/hlo_cost_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/lib/gtl/map_util.h"
#include "tsl/platform/errors.h"
namespace xla {
HloCostAnalysis::HloCostAnalysis(const Options& options) : options_(options) {}
HloCostAnalysis::HloCostAnalysis(ShapeSizeFunction shape_size,
const Properties& per_second_rates)
: HloCostAnalysis(Options{shape_size, per_second_rates}) {}
absl::Status HloCostAnalysis::Preprocess(const HloInstruction* hlo) {
current_properties_ = Properties();
current_should_compute_bottleneck_time_ = true;
float bytes_accessed = GetShapeSize(hlo->shape());
current_properties_.set_output_bytes_accessed(GetShapeSize(hlo->shape()));
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
const HloInstruction* operand = hlo->operand(i);
bytes_accessed += GetShapeSize(operand->shape());
current_properties_.set_operand_bytes_accessed(
i, GetShapeSize(operand->shape()));
current_properties_.set_operand_utilization(i, 1.0);
}
current_properties_[kBytesAccessedKey] = bytes_accessed;
return absl::OkStatus();
}
absl::Status HloCostAnalysis::Postprocess(const HloInstruc | #include "xla/service/hlo_cost_analysis.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/client/client.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/client/padding.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/local_service.h"
#include "xla/service/service.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class HloCostAnalysisTest : public ::testing::Test {
protected:
HloCostAnalysisTest()
: client_(ClientLibrary::LocalClientOrDie()),
service_(static_cast<Service*>(ClientLibrary::GetXlaService(
static_cast<LocalClient*>(client_)->platform()))) {
{
XlaBuilder builder("add_and_exp");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto half = ConstantR0<float>(&builder, 0.5);
Exp(Add(x, half));
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
add_and_exp_ = std::move(computation_status).value();
}
{
XlaBuilder builder("add");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
Add(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
add_ = std::move(computation_status).value();
}
{
XlaBuilder builder("sigmoid");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto one = ConstantR0<float>(&builder, 1.0);
Div(one, Add(one, Exp(Neg(x))));
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
sigmoid_ = std::move(computation_status).value();
}
{
XlaBuilder builder("max");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
Max(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
max_ = std::move(computation_status).value();
}
{
XlaBuilder builder("gt");
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x");
auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y");
Gt(x, y);
auto computation_status = builder.Build();
TF_CHECK_OK(computation_status.status());
gt_ = std::move(computation_status).value();
}
}
std::unique_ptr<HloModule> BuildHloGraph(XlaBuilder* builder) {
auto computation_status = builder->Build();
TF_CHECK_OK(computation_status.status());
auto computation = std::move(computation_status).value();
auto config = HloModule::CreateModuleConfigFromProto(computation.proto(),
DebugOptions())
.value();
return HloModule::CreateFromProto(computation.proto(), config).value();
}
Client* client_;
Service* service_;
XlaComputation add_;
XlaComputation add_and_exp_;
XlaComputation sigmoid_;
XlaComputation max_;
XlaComputation gt_;
};
TEST_F(HloCostAnalysisTest, MatrixMultiply) {
XlaBuilder builder("matrix_multiply");
auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs");
auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs");
Dot(lhs, rhs);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 + 5 * 30 + 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 30);
}
TEST_F(HloCostAnalysisTest, DotGeneral) {
XlaBuilder builder("matrix_multiply");
auto lhs =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5, 5}), "lhs");
auto rhs =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 5, 30}), "rhs");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(0);
dnums.add_rhs_contracting_dimensions(1);
DotGeneral(lhs, rhs, dnums);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 * 5 + 5 * 5 * 30 + 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 10 * 5 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1),
sizeof(float) * 5 * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 30);
}
TEST_F(HloCostAnalysisTest, DotGeneral2) {
XlaBuilder builder("matrix_multiply");
auto lhs =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5, 5}), "lhs");
auto rhs =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 5, 30}), "rhs");
DotDimensionNumbers dnums;
dnums.add_lhs_contracting_dimensions(1);
dnums.add_lhs_batch_dimensions(2);
dnums.add_rhs_contracting_dimensions(0);
dnums.add_rhs_batch_dimensions(1);
DotGeneral(lhs, rhs, dnums);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 * 5 + 5 * 5 * 30 + 5 * 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 10 * 5 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1),
sizeof(float) * 5 * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 5 * 10 * 30);
}
TEST_F(HloCostAnalysisTest, DotGeneral3) {
XlaBuilder builder("matrix_multiply");
auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs");
auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs");
DotDimensionNumbers dnums;
DotGeneral(lhs, rhs, dnums);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5);
EXPECT_EQ(analysis.transcendental_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 5 + 5 * 30 + 5 * 5 * 10 * 30));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 30);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * 5 * 5 * 10 * 30);
}
TEST_F(HloCostAnalysisTest, Map) {
XlaBuilder builder("map");
auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10}), "in");
Map(&builder, {input}, add_and_exp_, {0});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 10);
EXPECT_EQ(analysis.transcendental_count(), 10);
EXPECT_EQ(analysis.bytes_accessed(), 80);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10);
}
TEST_F(HloCostAnalysisTest, Convolution) {
XlaBuilder builder("convolution");
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, 10,
20}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, 3,
3}),
"kernel");
Conv(input, kernel, {1, 1}, Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 8 * 18 * 2 * 3 * 3);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 20 + 3 * 3 + 8 * 18));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 3 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 8 * 18);
}
TEST_F(HloCostAnalysisTest, ConvolutionSame) {
XlaBuilder builder("convolution_same");
const int iw = 3;
const int ih = 3;
const int kw = 3;
const int kh = 3;
const int ow = iw;
const int oh = ih;
const int sx = 1;
const int sy = 1;
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, ih,
iw}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, kh,
kw}),
"kernel");
Conv(input, kernel, {sx, sy}, Padding::kSame);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * (4 + 6 + 4 + 6 + 9 + 6 + 4 + 6 + 4));
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (iw * ih + kw * kh + ow * oh));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * iw * ih);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * kw * kh);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * ow * oh);
}
TEST_F(HloCostAnalysisTest, ConvolutionExtreme) {
XlaBuilder builder("convolution");
constexpr int64_t kLarge = 512 * 1024;
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, kLarge}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, kLarge}),
"kernel");
ConvGeneralDilated(input, kernel, {kLarge - 1}, {{0, 0}}, {kLarge}, {1},
XlaBuilder::CreateDefaultConvDimensionNumbers(1));
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * kLarge);
}
TEST_F(HloCostAnalysisTest, ConvolutionExtreme2) {
XlaBuilder builder("convolution");
constexpr int64_t kLarge = 512 * 1024;
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 1, 1}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {1, 1, kLarge}),
"kernel");
ConvGeneralDilated(input, kernel, {1}, {{kLarge - 1, kLarge - 1}}, {1}, {1},
XlaBuilder::CreateDefaultConvDimensionNumbers(1));
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * kLarge);
}
TEST_F(HloCostAnalysisTest, ConvolutionWithFeatureGroup) {
XlaBuilder builder("convolution");
auto input = Parameter(
&builder, 0,
ShapeUtil::MakeShape(F32, {1, 120, 10,
20}),
"input");
auto kernel = Parameter(
&builder, 1,
ShapeUtil::MakeShape(F32, {120, 1, 3,
3}),
"kernel");
Conv(input, kernel, {1, 1}, Padding::kValid, 120);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 120 * 8 * 18 * 2 * 3 * 3);
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (120 * 10 * 20 + 120 * 3 * 3 + 120 * 8 * 18));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 120 * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1),
sizeof(float) * 120 * 3 * 3);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * 120 * 8 * 18);
}
TEST_F(HloCostAnalysisTest, Reduce) {
XlaBuilder builder("reduce");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
Reduce(input, ConstantR0<float>(&builder, 0.0f), add_, {1});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 10 * 20 - 10);
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 1 + 10));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10);
}
TEST_F(HloCostAnalysisTest, ReduceWindow) {
XlaBuilder builder("reduce_window");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {4, 5},
Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 4 * (4 * 5 - 1));
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 1 + 2 * 4));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 4);
}
TEST_F(HloCostAnalysisTest, ReduceWindowWithOverlaps) {
XlaBuilder builder("reduce_window");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {8, 8}), "input");
ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {2, 1},
Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
int n_output_elements = 3 * 4;
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(root->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), n_output_elements * (4 * 5 - 1));
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (8 * 8 + 1 + n_output_elements));
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 8 * 8);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * n_output_elements);
}
TEST_F(HloCostAnalysisTest, ReduceWindowSingleDimReduceBroadcast) {
absl::string_view hlo_text = R"(
HloModule fusion.50
region_0.868 {
Arg_1.870 = f32[] parameter(1)
Arg_0.869 = f32[] parameter(0)
ROOT maximum.871 = f32[] maximum(Arg_0.869, Arg_1.870)
}
ENTRY fusion.50 {
constant.367 = f32[] constant(-inf)
param0 = f32[2,3,1024,1024]{2,3,1,0} parameter(0)
ROOT reduce-window.159 = f32[2,3,1024,1024]{2,3,1,0} reduce-window(param0, constant.367), window={size=1x1x1x2047 pad=0_0x0_0x0_0x1023_1023}, to_apply=region_0.868
}
)";
auto hlo_module = ParseAndReturnUnverifiedModule(hlo_text).value();
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), (2 * 3 * 1024) + (1024 - 1));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0),
sizeof(float) * 2 * 3 * 1024 * 1024);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root),
sizeof(float) * 2 * 3 * 1024 * 1024);
}
TEST_F(HloCostAnalysisTest, ReduceWindowVariadic) {
XlaBuilder builder("reduce_window_variadic");
auto elem_shape = ShapeUtil::MakeShape(F32, {});
auto p2 = Parameter(&builder, 0, elem_shape, "x0");
auto p3 = Parameter(&builder, 1, elem_shape, "x1");
auto p4 = Parameter(&builder, 2, elem_shape, "y0");
auto p5 = Parameter(&builder, 3, elem_shape, "y1");
absl::InlinedVector<XlaOp, 2> compute_vec = {Min(p2, p4), Min(p3, p5)};
Tuple(&builder, compute_vec);
TF_ASSERT_OK_AND_ASSIGN(auto compute_tuple, builder.Build());
auto input1 =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input1");
auto input2 =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {10, 20}), "input2");
auto init = ConstantR0<float>(&builder, 0);
ReduceWindow({input1, input2}, {init, init}, compute_tuple, {4, 5}, {4, 5},
Padding::kValid);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 4 * 2 * (4 * 5 - 1));
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 * 2 + 2 * 3));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 4);
}
TEST_F(HloCostAnalysisTest, SelectAndScatter) {
XlaBuilder builder("select_and_scatter");
auto operand =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input");
auto source =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 4}), "source");
SelectAndScatter(operand, gt_, {4, 5}, {4, 5}, Padding::kValid, source,
ConstantR0<float>(&builder, 0), add_);
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 4 * (4 * 5 - 1 + 1));
EXPECT_EQ(analysis.bytes_accessed(),
sizeof(float) * (10 * 20 + 2 * 4 + 1 + 10 * 20));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 2 * 4);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 20);
}
TEST_F(HloCostAnalysisTest, Broadcast) {
XlaBuilder b("broadcast");
Broadcast(ConstantR0<float>(&b, 42), {10, 7});
auto hlo_module = BuildHloGraph(&b);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (1 + 10 * 7));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 1);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 7);
}
TEST_F(HloCostAnalysisTest, BroadcastCountMultipleInputAccesses) {
XlaBuilder b("broadcast");
Broadcast(ConstantR0<float>(&b, 42), {10, 7});
auto hlo_module = BuildHloGraph(&b);
HloCostAnalysis analysis(HloCostAnalysis::Options{
.shape_size = ShapeSize, .count_multiple_input_accesses = true});
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 0);
EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (1 + 10 * 7));
HloInstruction* root = hlo_module->entry_computation()->root_instruction();
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 7);
EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 7);
}
TEST_F(HloCostAnalysisTest, FullyConnectedForward) {
XlaBuilder builder("fully_connected_forward");
auto input =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "input");
auto weight =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 20}), "weight");
auto bias = Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {20}), "bias");
Map(&builder, {Add(Dot(input, weight), bias, {1})}, sigmoid_, {0, 1});
auto hlo_module = BuildHloGraph(&builder);
HloCostAnalysis analysis(ShapeSize);
ASSERT_IS_OK(
hlo_module->entry_computation()->root_instruction()->Accept(&analysis));
EXPECT_EQ(analysis.flop_count(), 2 * 1000 + 200 + 3 * 200);
EXPECT_EQ(analysis.transcendental_count(), 200);
}
TEST_F(HloCostAnalysisTest, MatmulAndConvolutionCanBeTheSameComputation) {
HloCostAnalysis conv_analysis(ShapeSize);
{
XlaBuilder builder("conv_looking_matmul");
auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
"input");
auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}),
"weights");
Conv(lhs, rhs, {1, 1}, Padding::kSame);
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept(
&conv_analysis));
}
HloCostAnalysis matmul_analysis(ShapeSize);
{
XlaBuilder builder("matmul");
auto lhs =
Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64}), "input");
auto rhs =
Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64}), "weights");
Dot(lhs, rhs);
auto hlo_module = BuildHloGraph(&builder);
ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept(
&matmul_analysis));
}
EXPECT_EQ(conv_analysis.flop_count(), matmul_analysis.flop_count());
}
using FusionCostAnalysis = HloTestBase;
TEST_F(FusionCostAnalysis, LoopFusionDynUpdateSlice) {
const char* hlo_fusion_module_str = R"(
HloModule module
_.1 {
tmp_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0)
tmp_1 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2)
tmp_2 = s32[]{:T(128)} parameter(1)
tmp_3 = s32[]{:T(128)} constant(0)
tmp_4 = bf16[1,32,256,1152]{3,2,1,0:T(8,128)(2,1)S(3)} dynamic-slice(tmp_1, tmp_2, tmp_3, tmp_3, tmp_3), dynamic_slice_sizes={1,32,256,1152}
tmp_11 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(tmp_0, tmp_4, tmp_2, tmp_3, tmp_3, tmp_3)
ROOT tmp_20 = (bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)}) tuple(tmp_11)
}
ENTRY _ {
_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0)
_1 = s32[]{:T(128)} parameter(1)
_4 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2)
ROOT _ = (bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)}) fusion(_0, _1, _4), kind=kLoop, calls=_.1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
HloCostAnalysis fusion_analysis(ShapeSize);
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_IS_OK(fusion->Accept(&fusion_analysis));
const char* hlo_dus_module_str = R"(
HloModule module
ENTRY _ {
_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0)
_1 = s32[]{:T(128)} parameter(1)
_2 = bf16[1,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2)
ROOT _ = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(_0, _2, _1, _1, _1, _1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto dus_module,
ParseAndReturnVerifiedModule(hlo_dus_module_str));
HloCostAnalysis dus_analysis(ShapeSize);
auto dus = dus_module->entry_computation()->root_instruction();
ASSERT_IS_OK(dus->Accept(&dus_analysis));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), 0);
EXPECT_EQ(fusion_analysis.bytes_accessed(), dus_analysis.bytes_accessed());
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0),
dus_analysis.operand_bytes_accessed(*dus, 0));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1),
dus_analysis.operand_bytes_accessed(*dus, 2));
EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2),
dus_analysis.operand_bytes_accessed(*dus, 1));
EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion),
dus_analysis.output_bytes_accessed(*dus));
}
TEST_F(FusionCostAnalysis, LoopFusion) {
for (int i = 0; i < 4; ++i) {
Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2});
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
0.0f, 1.0f, 2, 2)));
auto c2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
1.0f, 2.0f, 2, 2)));
auto c3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace(
2.0f, 3.0f, 2, 2)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c2));
auto clamp = builder.AddInstruction(
HloInstruction::CreateTernary(r2f32, HloOpcode::kClamp, c2, add, add));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r2f32, HloOpcode::kExp, add));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, exp, c3));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kSubtract, mul, clamp));
auto tuple = HloInstruction::CreateTuple({sub, sub, mul, c1});
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop); |
1,983 | cpp | tensorflow/tensorflow | broadcast_canonicalizer | third_party/xla/xla/service/broadcast_canonicalizer.cc | third_party/xla/xla/service/broadcast_canonicalizer_test.cc | #ifndef XLA_SERVICE_BROADCAST_CANONICALIZER_H_
#define XLA_SERVICE_BROADCAST_CANONICALIZER_H_
#include <optional>
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class BroadcastCanonicalizer : public HloModulePass {
public:
explicit BroadcastCanonicalizer();
absl::string_view name() const override { return "broadcast_canonicalizer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/broadcast_canonicalizer.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
BroadcastCanonicalizer::BroadcastCanonicalizer() {}
absl::StatusOr<bool> BroadcastCanonicalizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto& computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kBroadcast) {
continue;
}
if (absl::c_is_sorted(hlo->dimensions())) {
continue;
}
std::vector<int64_t> new_dims(hlo->dimensions().begin(),
hlo->dimensions().end());
std::vector<int64_t> original_dims(hlo->dimensions().begin(),
hlo->dimensions().end());
std::vector<int64_t> new_broadcast_dims(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
absl::c_sort(new_dims);
const int64_t rank = hlo->shape().rank();
for (int i = 0; i < new_dims.size(); ++i) {
new_broadcast_dims[new_dims[i]] =
hlo->operand(0)->shape().dimensions(i);
}
auto new_broadcast = MakeBroadcastHlo(hlo->mutable_operand(0), new_dims,
new_broadcast_dims);
std::vector<int64_t> transpose_dims(rank);
absl::c_iota(transpose_dims, 0);
for (int i = 0; i < new_dims.size(); ++i) {
transpose_dims[new_dims[i]] = new_dims[std::distance(
original_dims.begin(), absl::c_find(original_dims, new_dims[i]))];
}
TF_ASSIGN_OR_RETURN(new_broadcast,
MakeTransposeHlo(new_broadcast, transpose_dims));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, new_broadcast));
changed = true;
}
}
return changed;
}
} | #include "xla/service/broadcast_canonicalizer.h"
#include <functional>
#include <memory>
#include <optional>
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class BroadcastCanonicalizerTest : public HloTestBase {};
TEST_F(BroadcastCanonicalizerTest, ReshapeBroadcast) {
const char* hlo = R"(
HloModule fusion.1644
ENTRY fusion.1644 {
parameter.2 = f32[2,3,2]{2,1,0} parameter(0)
%broadcast.399 = f32[3,2,8,2]{3,2,1,0} broadcast(%parameter.2), dimensions={1,0,3}
ROOT %reshape.43 = f32[3,16,1,2]{3,2,1,0} reshape(f32[3,2,8,2]{3,2,1,0} %broadcast.399)
}
)";
RunAndFilecheckHloRewrite(hlo, BroadcastCanonicalizer{}, R"(
)");
}
TEST_F(BroadcastCanonicalizerTest, ReshapeBroadcast22) {
const char* hlo = R"(
HloModule fusion.1644
ENTRY fusion.1644 {
parameter.2 = f32[5,6,7]{2,1,0} parameter(0)
%broadcast.399 = f32[8,7,9,5,6]{4,3,2,1,0} broadcast(%parameter.2), dimensions={3,4,1}
ROOT %reshape.43 = f32[8,7,45,1,6]{4,3,2,1,0} reshape(%broadcast.399)
}
)";
RunAndFilecheckHloRewrite(hlo, BroadcastCanonicalizer{}, R"(
)");
}
}
} |
1,984 | cpp | tensorflow/tensorflow | while_loop_expensive_invariant_code_motion | third_party/xla/xla/service/while_loop_expensive_invariant_code_motion.cc | third_party/xla/xla/service/while_loop_expensive_invariant_code_motion_test.cc | #ifndef XLA_SERVICE_WHILE_LOOP_EXPENSIVE_INVARIANT_CODE_MOTION_H_
#define XLA_SERVICE_WHILE_LOOP_EXPENSIVE_INVARIANT_CODE_MOTION_H_
#include <functional>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
class WhileLoopExpensiveInvariantCodeMotion : public HloModulePass {
public:
using ShapeSizeFunction = std::function<int64_t(const Shape&)>;
explicit WhileLoopExpensiveInvariantCodeMotion(
HloPredicate worth_hoisting_individually,
ShapeSizeFunction shape_size_function = ShapeUtil::ByteSizeOfElements)
: shape_size_function_(std::move(shape_size_function)),
worth_hoisting_individually_(std::move(worth_hoisting_individually)) {}
~WhileLoopExpensiveInvariantCodeMotion() override = default;
absl::string_view name() const override {
return "while-loop-expensive-invariant-code-motion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> TryHoistingInvariantInstructionsFromWhileBody(
HloInstruction* while_instr);
ShapeSizeFunction shape_size_function_;
HloPredicate worth_hoisting_individually_;
};
}
#endif
#include "xla/service/while_loop_expensive_invariant_code_motion.h"
#include <iterator>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/service/while_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace {
using absl::flat_hash_map;
using absl::flat_hash_set;
using absl::InlinedVector;
struct InvariantInfo {
explicit InvariantInfo(int64_t user_count)
: remaining_user_count(user_count) {}
int64_t transitive_input_size = 0;
int64_t remaining_user_count;
HloInstruction* hoisted_copy = nullptr;
InlinedVector<HloInstruction*, 2> blocked_users;
};
static void CreateLoopInvariantCopy(
flat_hash_map<HloInstruction*, InvariantInfo>* invariant_instructions,
HloInstruction* while_instr, HloInstruction* to_hoist) {
HloComputation* parent_of_while = while_instr->parent();
HloComputation* while_body = while_instr->while_body();
struct DFSFrame {
HloInstruction* instruction;
int64_t operand_index;
};
InlinedVector<DFSFrame, 8> dfs_stack;
dfs_stack.push_back({to_hoist, 0});
HloInstruction* while_body_param = while_body->parameter_instruction(0);
HloInstruction* while_operand = while_instr->mutable_operand(0);
do {
DFSFrame* frame = &dfs_stack.back();
if (frame->operand_index == frame->instruction->operand_count()) {
HloInstruction* old_instruction = frame->instruction;
InvariantInfo& info = FindOrDie(*invariant_instructions, old_instruction);
if (info.hoisted_copy == nullptr) {
auto get_new_operand = [&](HloInstruction* old_operand) {
return old_operand == while_body_param
? while_operand
: FindOrDie(*invariant_instructions, old_operand)
.hoisted_copy;
};
InlinedVector<HloInstruction*, 4> new_operands;
absl::c_transform(old_instruction->operands(),
std::back_inserter(new_operands), get_new_operand);
HloInstruction* new_instruction = parent_of_while->AddInstruction(
old_instruction->CloneWithNewOperands(old_instruction->shape(),
new_operands));
info.hoisted_copy = new_instruction;
}
dfs_stack.pop_back();
continue;
}
HloInstruction* next_operand =
frame->instruction->mutable_operand(frame->operand_index++);
if (next_operand == while_body_param ||
FindOrDie(*invariant_instructions, next_operand).hoisted_copy !=
nullptr) {
continue;
}
dfs_stack.push_back({next_operand, 0});
} while (!dfs_stack.empty());
}
}
absl::StatusOr<bool> WhileLoopExpensiveInvariantCodeMotion::
TryHoistingInvariantInstructionsFromWhileBody(HloInstruction* while_instr) {
auto print_no_metadata = HloPrintOptions{}.set_print_metadata(false);
if (!while_instr->shape().IsTuple()) {
return false;
}
std::string while_instr_name = while_instr->ToString(print_no_metadata);
VLOG(2) << "Trying to hoist from " << while_instr_name;
auto maybe_upper_bound = ComputeWhileLoopTripCountUpperBound(while_instr);
if (maybe_upper_bound && *maybe_upper_bound <= 1) {
VLOG(2) << "Loop has a trip count of at most 1, skipping.";
return false;
}
HloComputation* while_body = while_instr->while_body();
flat_hash_map<HloInstruction*, InvariantInfo> invariant_instructions;
flat_hash_map<HloInstruction*, int64_t> to_hoist_when_ready;
for (auto* instr : WhileUtil::GetInvariantGTEsForWhileBody(*while_body)) {
if (instr->shape().IsArray()) {
auto emplace_result = invariant_instructions.emplace(
instr, InvariantInfo(instr->user_count() - 1));
CHECK(emplace_result.second);
InvariantInfo& info = emplace_result.first->second;
info.transitive_input_size = shape_size_function_(instr->shape());
}
}
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kDomain ||
instruction->IsCustomCall("SPMDFullToShardShape") ||
instruction->IsCustomCall("SPMDShardShapeToFull")) {
return false;
}
}
std::vector<HloInstruction*> instructions_to_replace;
std::vector<HloInstruction*> replacement_instructions;
auto hoist = [&](HloInstruction* instruction, const InvariantInfo& info) {
if (info.hoisted_copy) {
return;
}
VLOG(2) << "Hoisting " << instruction->ToString(print_no_metadata);
CreateLoopInvariantCopy(&invariant_instructions, while_instr, instruction);
instructions_to_replace.push_back(instruction);
replacement_instructions.push_back(info.hoisted_copy);
};
flat_hash_set<HloInstruction*> checked_operands;
for (auto* instruction : while_body->MakeInstructionPostOrder()) {
if (instruction->HasSideEffect() ||
instruction->opcode() == HloOpcode::kParameter ||
!instruction->control_predecessors().empty() ||
!instruction->control_successors().empty() ||
instruction == while_body->root_instruction()) {
continue;
}
auto is_invariant = [&](HloInstruction* op) {
return invariant_instructions.find(op) != invariant_instructions.end();
};
if (!absl::c_all_of(instruction->operands(), is_invariant)) {
continue;
}
auto emplace_result = invariant_instructions.emplace(
instruction, InvariantInfo(instruction->user_count()));
CHECK(emplace_result.second);
InvariantInfo& instr_info = emplace_result.first->second;
for (auto* user : instruction->users()) {
if (user == while_body->root_instruction()) {
--instr_info.remaining_user_count;
break;
}
}
int64_t num_blocking_operands = 0;
int64_t output_size = 0;
for (auto* operand : instruction->operands()) {
auto& operand_info = invariant_instructions.at(operand);
if (!checked_operands.contains(operand)) {
instr_info.transitive_input_size += operand_info.transitive_input_size;
--operand_info.remaining_user_count;
checked_operands.insert(operand);
}
if (operand_info.remaining_user_count == 0) {
for (auto* user : operand_info.blocked_users) {
auto it = to_hoist_when_ready.find(user);
if (it != to_hoist_when_ready.end()) {
auto& num_blocking = it->second;
CHECK_GT(num_blocking, 0);
--num_blocking;
if (num_blocking == 0) {
hoist(user, invariant_instructions.at(user));
to_hoist_when_ready.erase(it);
}
}
}
operand_info.blocked_users.clear();
} else if (operand_info.remaining_user_count > 0) {
++num_blocking_operands;
if (operand_info.blocked_users.empty() ||
operand_info.blocked_users.back() != instruction) {
operand_info.blocked_users.push_back(instruction);
}
} else {
LOG(FATAL)
<< "An instruction should not have number of negative users.";
}
}
checked_operands.erase(checked_operands.begin(), checked_operands.end());
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&output_size, this](const Shape& subshape,
const ShapeIndex& ) {
if (subshape.IsArray()) {
output_size += shape_size_function_(subshape);
}
});
if (output_size > instr_info.transitive_input_size) {
continue;
}
if (!worth_hoisting_individually_(instruction)) {
continue;
}
if (num_blocking_operands > 0) {
to_hoist_when_ready.emplace(instruction, num_blocking_operands);
continue;
}
hoist(instruction, instr_info);
}
if (instructions_to_replace.empty()) {
return false;
}
TF_ASSIGN_OR_RETURN(
WhileUtil::MakeInstructionsLiveInResult live_in_instructions_result,
WhileUtil::MakeInstructionsLiveIn(while_instr, replacement_instructions));
HloComputation* new_while_body =
live_in_instructions_result.new_while_instr->while_body();
for (int i = 0; i < instructions_to_replace.size(); i++) {
HloInstruction* instruction_to_replace_in_new_while =
FindOrDie(live_in_instructions_result.while_body_instruction_map,
instructions_to_replace[i]);
TF_RETURN_IF_ERROR(new_while_body->ReplaceInstruction(
instruction_to_replace_in_new_while,
live_in_instructions_result.while_body_live_in_values[i]));
}
VLOG(1) << "Hoisted " << instructions_to_replace.size()
<< " instructions from " << while_instr_name;
return true;
}
absl::StatusOr<bool> WhileLoopExpensiveInvariantCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before WhileLoopExpensiveInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->computations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(
bool result,
TryHoistingInvariantInstructionsFromWhileBody(while_instr));
changed |= result;
}
if (changed) {
VLOG(2) << "HLO module after WhileLoopExpensiveInvariantCodeMotion:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2)
<< "HLO module unchanged after WhileLoopExpensiveInvariantCodeMotion";
}
return changed;
}
} | #include "xla/service/while_loop_expensive_invariant_code_motion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using WhileLoopExpensiveInvariantCodeMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
constexpr char kModuleWithNonInflatingInvariantDot[] = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[], f32[16, 8]) parameter(0)
b = get-tuple-element(p_body), index=1
const = f32[] constant(1.0)
lhs = f32[8, 16] broadcast(const), dimensions={}
dot = dot(lhs, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reduced = reduce(dot, const), dimensions={0, 1}, to_apply=mul
a = get-tuple-element(p_body), index=0
add = add(reduced, a)
ROOT root = tuple(add, b)
}
condition {
p_cond = (f32[], f32[16, 8]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[] parameter(0)
param1 = f32[16, 8] parameter(1)
while_init = tuple(param0, param1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsGroupOfAllowedNonInflating) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithNonInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
EXPECT_THAT(while_body->instructions(), Contains(op::Reduce()));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsGroupOfAllNonInflating) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithNonInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot,
HloOpcode::kReduce>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Reduce())));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
DoesNotHoistsUnallowedInstructions) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithNonInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateFalse)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
constexpr char kModuleWithInflatingInvariantDot[] = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[], f32[16, 4]) parameter(0)
b = get-tuple-element(p_body), index=1
const = f32[] constant(1.0)
lhs = f32[4, 16] broadcast(const), dimensions={}
dot = dot(lhs, b), lhs_contracting_dims={0}, rhs_contracting_dims={1}
reduced = reduce(dot, const), dimensions={0, 1}, to_apply=mul
a = get-tuple-element(p_body), index=0
add = add(reduced, a)
ROOT root = tuple(add, b)
}
condition {
p_cond = (f32[], f32[16, 4]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[] parameter(0)
param1 = f32[16, 4] parameter(1)
while_init = tuple(param0, param1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest, DoesNotHoistsInflating) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsGroupOfNonInflatingWithInflatingIntermediate) {
auto m =
ParseAndReturnVerifiedModule(kModuleWithInflatingInvariantDot).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot,
HloOpcode::kReduce>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Reduce())));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
HoistsOpWithDuplicateOperands) {
constexpr char kModuleWithDuplicateOperands[] = R"(
HloModule ModuleWithWhile
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
body {
p_body = (f32[4, 4], f32[4, 4]) parameter(0)
a = get-tuple-element(p_body), index=0
dot = dot(a, a), lhs_contracting_dims={0}, rhs_contracting_dims={1}
b = get-tuple-element(p_body), index=1
add = add(b, dot)
ROOT root = tuple(a, add)
}
condition {
p_cond = (f32[4, 4], f32[4, 4]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[4, 4] parameter(0)
param1 = f32[4, 4] parameter(1)
while_init = tuple(param0, param1)
ROOT while = while(while_init), condition=condition, body=body
}
)";
auto m = ParseAndReturnVerifiedModule(kModuleWithDuplicateOperands).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_TRUE(simplified_loop);
HloComputation* while_body = m->GetComputationWithName("wide.body");
ASSERT_NE(while_body, nullptr);
EXPECT_THAT(while_body->instructions(), Not(Contains(op::Dot())));
}
TEST_F(WhileLoopExpensiveInvariantCodeMotionTest,
DoesNotHoistShardingCustomCalls) {
constexpr char kModuleWithShardingCustomCalls[] = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[4, 4], f32[4, 4]) parameter(0)
a = f32[4, 4] get-tuple-element(p_body), index=0
custom-call.1 = f32[4, 4] custom-call(a), custom_call_target="Sharding", sharding={devices=[4,1]0,1,2,3}
custom-call.2 = f32[4, 4] custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
dot = f32[4, 4] dot(a, a), lhs_contracting_dims={0}, rhs_contracting_dims={1}
b = f32[4, 4] get-tuple-element(p_body), index=1
add = f32[4, 4] add(b, dot)
custom-call.3 = f32[4, 4] custom-call(add), custom_call_target="Sharding", sharding={manual}
custom-call.4 = f32[4, 4] custom-call(custom-call.3), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1]0,1,2,3}
ROOT root = (f32[4, 4], f32[4, 4]) tuple(a, custom-call.4)
}
condition {
p_cond = (f32[4, 4], f32[4, 4]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
param0 = f32[4, 4] parameter(0)
param1 = f32[4, 4] parameter(1)
while_init = (f32[4, 4], f32[4, 4]) tuple(param0, param1)
ROOT while = (f32[4, 4], f32[4, 4]) while(while_init), condition=condition, body=body
}
)";
auto m = ParseAndReturnVerifiedModule(kModuleWithShardingCustomCalls).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
WhileLoopExpensiveInvariantCodeMotion(
HloPredicateIsOp<HloOpcode::kDot>)
.Run(m.get()));
EXPECT_FALSE(simplified_loop);
}
}
} |
1,985 | cpp | tensorflow/tensorflow | hlo_proto_util | third_party/xla/xla/service/hlo_proto_util.cc | third_party/xla/xla/service/hlo_proto_util_test.cc | #ifndef XLA_SERVICE_HLO_PROTO_UTIL_H_
#define XLA_SERVICE_HLO_PROTO_UTIL_H_
#include <string>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/hlo.pb.h"
namespace xla {
HloProto MakeHloProto(const HloModule& module,
const BufferAssignment& assignment);
HloProto MakeHloProto(const HloModule& module);
absl::StatusOr<std::unique_ptr<HloModule>> CreateModuleFromProto(
const HloModuleProto& proto, const HloModuleConfig& module_config,
bool is_module_post_optimizations = false);
absl::StatusOr<std::vector<const ShapeProto*>> EntryComputationParameterShapes(
const HloProto& hlo_proto);
absl::StatusOr<const ShapeProto*> EntryComputationOutputShape(
const HloProto& hlo_proto);
}
#endif
#include "xla/service/hlo_proto_util.h"
#include <memory>
#include <string>
#include <vector>
#include "xla/service/hlo_verifier.h"
#include "xla/util.h"
namespace xla {
HloProto MakeHloProto(const HloModule& module,
const BufferAssignment& assignment) {
BufferAssignmentProto proto_assignment = assignment.ToProto();
HloProto proto = MakeHloProto(module);
proto.mutable_buffer_assignment()->Swap(&proto_assignment);
return proto;
}
HloProto MakeHloProto(const HloModule& module) {
HloModuleProto proto_module = module.ToProto();
HloProto proto;
proto.mutable_hlo_module()->Swap(&proto_module);
return proto;
}
absl::StatusOr<std::unique_ptr<HloModule>> CreateModuleFromProto(
const HloModuleProto& proto, const HloModuleConfig& module_config,
bool is_module_post_optimizations) {
VLOG(4) << proto.ShortDebugString();
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(proto, module_config));
TF_RETURN_IF_ERROR(
HloVerifier(false,
is_module_post_optimizations)
.Run(module.get())
.status());
return module;
}
absl::StatusOr<std::vector<const ShapeProto*>> EntryComputationParameterShapes(
const HloProto& hlo_proto) {
if (!hlo_proto.has_hlo_module()) {
return NotFound("HloProto missing HloModuleProto.");
}
if (!hlo_proto.hlo_module().has_host_program_shape()) {
return NotFound("HloProto missing program shape.");
}
std::vector<const ShapeProto*> parameter_shapes;
const auto& program_shape = hlo_proto.hlo_module().host_program_shape();
for (const ShapeProto& shape : program_shape.parameters()) {
parameter_shapes.push_back(&shape);
}
return parameter_shapes;
}
absl::StatusOr<const ShapeProto*> EntryComputationOutputShape(
const HloProto& hlo_proto) {
if (!hlo_proto.has_hlo_module()) {
return NotFound("HloProto missing HloModuleProto.");
}
if (!hlo_proto.hlo_module().has_host_program_shape()) {
return NotFound("HloProto missing program shape.");
}
if (!hlo_proto.hlo_module().host_program_shape().has_result()) {
return NotFound("HloProto missing result in its program shape");
}
return &hlo_proto.hlo_module().host_program_shape().result();
}
} | #include "xla/service/hlo_proto_util.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloProtoUtilTest : public ::testing::Test {};
TEST_F(HloProtoUtilTest, ParamsAndOutputShapeMissingModule) {
HloProto hlo_proto;
auto status = EntryComputationParameterShapes(hlo_proto).status();
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.message(), ::testing::HasSubstr("missing HloModuleProto"));
}
TEST_F(HloProtoUtilTest, MissingProgramShape) {
HloProto hlo_proto;
HloModuleProto* module = hlo_proto.mutable_hlo_module();
module->set_name("entry");
auto status = EntryComputationParameterShapes(hlo_proto).status();
ASSERT_FALSE(status.ok());
ASSERT_THAT(status.message(), ::testing::HasSubstr("missing program shape"));
}
}
} |
1,986 | cpp | tensorflow/tensorflow | hlo_value_semantics_analysis | third_party/xla/xla/service/hlo_value_semantics_analysis.cc | third_party/xla/xla/service/hlo_value_semantics_analysis_test.cc | #ifndef XLA_SERVICE_HLO_VALUE_SEMANTICS_ANALYSIS_H_
#define XLA_SERVICE_HLO_VALUE_SEMANTICS_ANALYSIS_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
namespace xla {
struct SendRecvGroup {
HloInstruction* send;
HloInstruction* recv;
};
class SendRecvGroupMap {
public:
explicit SendRecvGroupMap(const HloModule& hlo_module);
SendRecvGroupMap(SendRecvGroupMap&& other) = default;
SendRecvGroupMap(const SendRecvGroupMap& other) = default;
virtual ~SendRecvGroupMap() = default;
virtual absl::StatusOr<HloInstruction*> GetMatchingSendOrRecv(
HloInstruction* send_or_recv) const;
private:
absl::flat_hash_map<std::string, SendRecvGroup> host_transfer_rendezvous_map_;
};
class HloPreOrderDFS {
public:
HloPreOrderDFS() = default;
~HloPreOrderDFS() = default;
absl::Status Run(const HloComputation& computation,
DfsHloVisitorBase<HloInstruction*>* visitor);
private:
bool IsReady(const HloInstruction* instruction) const;
std::vector<HloInstruction*> stack_;
absl::flat_hash_set<HloInstruction*> visited_;
};
using EinsumDepthMap =
absl::node_hash_map<const HloInstruction*, ShapeTree<int>>;
class EinsumDepthAnalysis : public DfsHloVisitorWithDefault {
public:
static absl::StatusOr<std::unique_ptr<EinsumDepthAnalysis>> Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map);
~EinsumDepthAnalysis() override = default;
absl::Status DefaultAction(HloInstruction* instruction) override;
absl::Status HandleTuple(HloInstruction* tuple) override;
absl::Status HandleGetTupleElement(
HloInstruction* get_tuple_element) override;
absl::Status HandleDot(HloInstruction* dot) override;
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleCall(HloInstruction* call) override;
absl::Status HandleFusion(HloInstruction* fusion) override;
absl::Status HandleWhile(HloInstruction* xla_while) override;
absl::Status HandleConditional(HloInstruction* conditional) override;
absl::Status HandleAfterAll(HloInstruction* after_all) override;
absl::Status HandleSend(HloInstruction* send) override;
absl::Status HandleRecv(HloInstruction* recv) override;
absl::Status HandleSendDone(HloInstruction* send_done) override;
absl::Status HandleRecvDone(HloInstruction* recv_done) override;
absl::Status HandleAllReduce(HloInstruction* all_reduce) override;
absl::Status HandleAsyncStart(HloInstruction* async_start) override;
absl::Status HandleAsyncDone(HloInstruction* async_done) override;
const EinsumDepthMap& GetEinsumDepthMap() const { return einsum_depth_map_; }
private:
explicit EinsumDepthAnalysis(const SendRecvGroupMap& send_recv_group_map)
: send_recv_group_map_(&send_recv_group_map) {}
absl::Status RunInternal(const HloComputation& computation,
const std::optional<ShapeTree<int>>& root_depth);
ShapeTree<int>& GetOrCreateDepthTree(const HloInstruction* instruction);
ShapeTree<int>& GetDepthTreeOrDie(const HloInstruction* instruction);
absl::Status SetInstructionDepth(const HloInstruction* instruction,
int depth);
absl::Status SetInstructionDepth(const HloInstruction* instruction,
const ShapeTree<int>& depth);
absl::Status SetInstructionDepthFromTupleDepth(
const HloInstruction* instruction, const ShapeTree<int>& tuple_depth_tree,
int tuple_index);
absl::Status HandleDepthIncrementInstruction(HloInstruction* instruction);
absl::Status HandleCalledComputation(
const HloComputation& called_computation,
const ShapeTree<int>& root_depth,
absl::Span<HloInstruction* const> operands);
absl::Status HandleTupleLike(HloInstruction* tuple_like);
EinsumDepthMap einsum_depth_map_;
const SendRecvGroupMap* const send_recv_group_map_;
};
using EinsumHeightMap =
absl::node_hash_map<const HloInstruction*, ShapeTree<int>>;
class EinsumHeightAnalysis : public DfsHloVisitorWithDefault {
public:
static absl::StatusOr<std::unique_ptr<EinsumHeightAnalysis>> Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map);
~EinsumHeightAnalysis() override = default;
absl::Status DefaultAction(HloInstruction* instruction) override;
absl::Status HandleTuple(HloInstruction* tuple) override;
absl::Status HandleGetTupleElement(
HloInstruction* get_tuple_element) override;
absl::Status HandleDot(HloInstruction* dot) override;
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleCall(HloInstruction* call) override;
absl::Status HandleFusion(HloInstruction* fusion) override;
absl::Status HandleWhile(HloInstruction* xla_while) override;
absl::Status HandleConditional(HloInstruction* conditional) override;
absl::Status HandleSend(HloInstruction* send) override;
absl::Status HandleRecv(HloInstruction* recv) override;
absl::Status HandleSendDone(HloInstruction* send_done) override;
absl::Status HandleRecvDone(HloInstruction* recv_done) override;
absl::Status HandleAllReduce(HloInstruction* all_reduce) override;
absl::Status HandleAsyncStart(HloInstruction* async_start) override;
absl::Status HandleAsyncDone(HloInstruction* async_done) override;
const EinsumHeightMap& GetEinsumHeightMap() const {
return einsum_height_map_;
}
private:
explicit EinsumHeightAnalysis(const SendRecvGroupMap& send_recv_group_map)
: send_recv_group_map_(&send_recv_group_map) {}
absl::Status RunInternal(const HloComputation& computation,
absl::Span<HloInstruction* const> operands);
ShapeTree<int>& GetOrCreateHeightTree(const HloInstruction* instruction);
ShapeTree<int>& GetHeightTreeOrDie(const HloInstruction* instruction);
bool HasHeightFor(const HloInstruction* instruction) const;
absl::Status SetInstructionHeight(const HloInstruction* instruction,
int height);
absl::Status SetInstructionHeight(const HloInstruction* instruction,
const ShapeTree<int>& height);
absl::Status HandleHeightIncrementInstruction(HloInstruction* instruction);
absl::Status HandleCalledComputation(
const HloComputation& computation,
absl::Span<HloInstruction* const> operands);
absl::Status HandleTupleLike(HloInstruction* tuple_like);
EinsumHeightMap einsum_height_map_;
const SendRecvGroupMap* const send_recv_group_map_;
};
enum class HloValueSemanticLabel {
kStatic,
kRandom,
kWeight,
kActivation,
kActivationGradient,
kWeightGradient,
kTupleOrToken,
};
std::string HloValueSemanticLabelToString(HloValueSemanticLabel label);
class HloValueSemantics {
public:
using Id = int64_t;
HloValueSemantics(HloValueSemanticLabel label, const HloPosition& origin);
HloValueSemantics(Id id, HloValueSemanticLabel label,
const HloPosition& origin);
HloValueSemantics(const HloValueSemantics& other) = default;
HloValueSemantics(HloValueSemantics&& other) = default;
HloValueSemantics& operator=(const HloValueSemantics& other) = default;
Id id() const { return id_; }
HloValueSemanticLabel label() const { return label_; }
const HloPosition& origin() const { return origin_; }
std::string ToString() const;
private:
const Id id_;
const HloValueSemanticLabel label_;
const HloPosition origin_;
};
std::string HloValueSemanticsTreeToString(
const ShapeTree<const HloValueSemantics*>& tree);
using HloValueSemanticsMap =
absl::node_hash_map<const HloInstruction*,
ShapeTree<const HloValueSemantics*>>;
class HloValueSemanticsPropagation;
class HloValueSemanticsAnalysis {
public:
static absl::StatusOr<std::unique_ptr<HloValueSemanticsAnalysis>> Run(
const HloModule& module,
const absl::flat_hash_set<std::string_view>& execution_threads = {});
virtual ~HloValueSemanticsAnalysis() = default;
bool HasSemanticsFor(const HloInstruction* instruction) const;
const HloValueSemantics* GetSemantics(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
const HloValueSemanticsMap& GetSemanticsMap() const {
return value_semantics_;
}
const EinsumDepthMap& GetEinsumDepthMap() const { return einsum_depth_map_; }
const EinsumHeightMap& GetEinsumHeightMap() const {
return einsum_height_map_;
}
int GetDepth(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
int GetHeight(const HloInstruction* instruction,
const ShapeIndex& index = {}) const;
const SendRecvGroupMap& GetSendRecvGroupMap() const {
return *send_recv_group_map_;
}
absl::StatusOr<HloInstruction*> GetMatchingSendOrRecv(
HloInstruction* send_or_recv) const;
protected:
friend class HloValueSemanticsPropagation;
explicit HloValueSemanticsAnalysis(
const HloModule& module,
const absl::flat_hash_set<std::string_view>& execution_threads);
virtual absl::Status InitializeEinsumDepth();
virtual absl::Status InitializeEinsumHeight();
virtual void InitializeSendRecvGroups();
void AnnotateWeights();
absl::Status RunOnComputation(
const HloComputation& computation,
absl::Span<const HloInstruction* const> operands);
virtual absl::Status RunOnComputation(const HloComputation& computation);
HloValueSemantics::Id NextId();
const HloValueSemantics* NewHloValueSemantics(HloValueSemanticLabel label,
const HloPosition& origin);
const ShapeTree<const HloValueSemantics*>& GetInstructionSemantics(
const HloInstruction* instruction) const;
void DeepCopyHloValueSemantics(
ShapeTree<const HloValueSemantics*>& copy_to,
const ShapeTree<const HloValueSemantics*>& copy_from,
const ShapeIndex& source_index, const ShapeIndex& destination_index);
void DeepCopyHloValueSemantics(
const HloInstruction* target,
const ShapeTree<const HloValueSemantics*>& copy_from,
const ShapeIndex& source_index = {});
void SetHloValueSemantics(
const HloInstruction* target,
const ShapeTree<const HloValueSemantics*>& semantics);
void DeleteHloValueSemantics(
const ShapeTree<const HloValueSemantics*>& to_delete);
void DeleteHloValueSemantics(const HloValueSemantics* to_delete);
const HloModule& module_;
const absl::flat_hash_set<absl::string_view>& execution_threads_;
HloValueSemanticsMap value_semantics_;
absl::flat_hash_map<HloValueSemantics::Id, std::unique_ptr<HloValueSemantics>>
value_semantics_map_;
HloValueSemantics::Id next_id_;
EinsumDepthMap einsum_depth_map_;
EinsumHeightMap einsum_height_map_;
std::unique_ptr<SendRecvGroupMap> send_recv_group_map_;
};
class HloValueSemanticsPropagation : public DfsHloVisitorWithDefault {
public:
explicit HloValueSemanticsPropagation(HloValueSemanticsAnalysis* analysis);
absl::Status Run(const HloComputation& computation);
absl::Status DefaultAction(HloInstruction* instruction) override;
absl::Status HandleParameter(HloInstruction* parameter) override;
absl::Status HandleConstant(HloInstruction* constant) override;
absl::Status HandleIota(HloInstruction* iota) override;
absl::Status HandlePartitionId(HloInstruction* partition_id) override;
absl::Status HandleReplicaId(HloInstruction* replica_id) override;
absl::Status HandleClamp(HloInstruction* clamp) override;
absl::Status HandleTuple(HloInstruction* tuple) override;
absl::Status HandleGetTupleElement(
HloInstruction* get_tuple_element) override;
absl::Status HandleCall(HloInstruction* call) override;
absl::Status HandleFusion(HloInstruction* fusion) override;
absl::Status HandleCustomCall(HloInstruction* custom_call) override;
absl::Status HandleWhile(HloInstruction* xla_while) override;
absl::Status HandleConditional(HloInstruction* conditional) override;
absl::Status HandleSelect(HloInstruction* select) override;
absl::Status HandleConcatenate(HloInstruction* concatenate) override;
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override;
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override;
absl::Status HandleCopyStart(HloInstruction* copy_start) override;
absl::Status HandleCopyDone(HloInstruction* copy_done) override;
absl::Status HandleAllGatherStart(HloInstruction* all_gather_start) override;
absl::Status HandleAllGatherDone(HloInstruction* all_gather_done) override;
absl::Status HandleCollectivePermuteStart(
HloInstruction* collective_permute_start) override;
absl::Status HandleCollectivePermuteDone(
HloInstruction* collective_permute_done) override;
absl::Status HandleGather(HloInstruction* gather) override;
absl::Status HandleScatter(HloInstruction* scatter) override;
absl::Status HandleAfterAll(HloInstruction* after_all) override;
absl::Status HandleAllReduce(HloInstruction* all_reduce) override;
absl::Status HandleAsyncStart(HloInstruction* async_start) override;
absl::Status HandleAsyncDone(HloInstruction* async_done) override;
absl::Status HandleInfeed(HloInstruction* infeed) override;
absl::Status HandleOutfeed(HloInstruction* outfeed) override;
absl::Status HandleDomain(HloInstruction* domain) override;
absl::Status HandleOptimizationBarrier(HloInstruction* opt_barrier) override;
absl::Status HandleRngBitGenerator(
HloInstruction* rng_bit_generator) override;
absl::Status HandleSend(HloInstruction* send) override;
absl::Status HandleRecv(HloInstruction* recv) override;
absl::Status HandleSendDone(HloInstruction* send_done) override;
absl::Status HandleRecvDone(HloInstruction* recv_done) override;
protected:
HloValueSemantics CopySemantics(const HloValueSemantics& semantics) const;
HloValueSemantics CopySemanticsWithNewOrigin(
const HloValueSemantics& semantics, HloInstruction* new_origin,
const ShapeIndex& index = {}) const;
const HloValueSemantics* AddSemantics(const HloValueSemantics& semantics);
struct EinsumAndOperandIndex {
HloInstruction* einsum;
int64_t operand_index;
};
std::vector<EinsumAndOperandIndex> FindEinsumsWhereOriginDependsOnOther(
const HloValueSemantics& semantics, const HloPosition& origin_dependence,
bool recursive = false) const;
bool OriginDependsOn(const HloValueSemantics& semantics,
const HloPosition& origin_dependence,
bool recursive = false) const;
absl::StatusOr<HloValueSemantics> MaybeCreateGradientSemantics(
HloInstruction* gradient_candidate,
HloValueSemanticLabel fallback_label) const;
absl::StatusOr<HloValueSemantics> ComputeSemanticsFromStaticAndOther(
const HloValueSemantics& static_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const;
absl::StatusOr<HloValueSemantics> ComputeSemanticsFromRandomAndOther(
const HloValueSemantics& random_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const;
absl::StatusOr<HloValueSemantics> ComputeSemanticsFromWeightAndOther(
const HloValueSemantics& weight_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const;
absl::StatusOr<HloValueSemantics> ComputeSemanticsFromActivationAndOther(
const HloValueSemantics& activation_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const;
absl::StatusOr<HloValueSemantics>
ComputeSemanticsFromActivationGradientAndOther(
const HloValueSemantics& activation_gradient_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const;
absl::StatusOr<HloValueSemantics> ComputeSemanticsFromWeightGradientAndOther(
const HloValueSemantics& weight_gradient_semantics,
const HloValueSemantics& other_semantics,
HloInstruction* instruction) const;
absl::StatusOr<HloValueSemantics> MergeSemanticsForAnInstruction(
HloInstruction* instruction,
std::vector<HloValueSemantics>& semantics_vec) const;
absl::StatusOr<HloValueSemantics> ComputeSemanticsFromOperands(
HloInstruction* instruction, absl::Span<const int64_t> operand_indices,
absl::Span<const ShapeIndex> operand_shape_indices = {}) const;
absl::Status HandleTupleLike(HloInstruction* tuple_like);
absl::Status HandleCollectiveOrCopyStart(HloInstruction* op_start);
absl::Status HandleCollectiveOrCopyDone(HloInstruction* op_done);
HloValueSemanticsAnalysis* analysis_;
};
}
#endif
#include "xla/service/hlo_value_semantics_analysis.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/side_effect_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
SendRecvGroupMap::SendRecvGroupMap(const HloModule& hlo_module) {
for (HloComputation* computation : hlo_module.computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kSend &&
instruction->opcode() != HloOpcode::kRecv) {
continue;
}
std::string rendezvous = instruction->frontend_attributes().map().at(
kXlaHostTransferRendezvousNameAttr);
auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous);
if (send_recv_iter == host_transfer_rendezvous_map_.end()) {
auto insert_success = host_transfer_rendezvous_map_.insert(
{rendezvous, SendRecvGroup{nullptr, nullptr}});
send_recv_iter = insert_success.first;
}
if (instruction->opcode() == HloOpcode::kSend) {
send_recv_iter->second.send = instruction;
} else {
send_recv_iter->second.recv = instruction;
}
}
}
}
absl::StatusOr<HloInstruction*> SendRecvGroupMap::GetMatchingSendOrRecv(
HloInstruction* send_or_recv) const {
if (send_or_recv->opcode() != HloOpcode::kSend &&
send_or_recv->opcode() != HloOpcode::kRecv) {
return InvalidArgument("Expecting only send or recv");
}
std::string rendezvous = send_or_recv->frontend_attributes().map().at(
kXlaHostTransferRendezvousNameAttr);
auto send_recv_iter = host_transfer_rendezvous_map_.find(rendezvous);
if (send_recv_iter == host_transfer_rendezvous_map_.end()) {
return Internal("Missing send or recv from send recv group.");
}
if (send_or_recv->opcode() == HloOpcode::kSend) {
return send_recv_iter->second.recv;
}
return send_recv_iter->second.send;
}
bool HloPreOrderDFS::IsReady(const HloInstruction* instruction) const {
for (HloInstruction* user : instruction->users()) {
if (!visited_.contains(user)) {
return false;
}
}
return true;
}
namespace {
std::vector<HloInstruction*> GetAllInstructionsWithZeroUsers(
const HloComputation& computation) {
std::vector<HloInstruction*> results;
for (HloInstruction* instruction : computation.instructions()) {
if (instruction->users().empty()) {
results.push_back(instruction);
}
}
return results;
}
}
absl::Status HloPreOrderDFS::Run(const HloComputation& computation,
DfsHloVisitorBase<HloInstruction*>* visitor) {
stack_.clear();
visited_.clear();
std::vector<HloInstruction*> roots =
GetAllInstructionsWithZeroUsers(computation);
for (HloInstruction* root : roots) {
stack_.push_back(root);
}
while (!stack_.empty()) {
HloInstruction* to_visit = stack_.back();
stack_.pop_back();
if (visited_.contains(to_visit)) {
continue;
}
visited_.insert(to_visit);
for (HloInstruction* operand : to_visit->mutable_operands()) {
if (IsReady(operand)) {
stack_.push_back(operand);
}
}
TF_RETURN_IF_ERROR(visitor->Preprocess(to_visit));
TF_RETURN_IF_ERROR(to_visit->Visit(visitor));
TF_RETURN_IF_ERROR(visitor->Postprocess(to_visit));
}
return absl::OkStatus();
}
namespace {
template <typename T>
std::string ToString(T element) {
return absl::StrCat(element);
}
template <>
std::string ToString(const HloValueSemantics* element) {
return element->ToString();
}
template <typename T>
std::string ToString(const ShapeTree<T>& tree) {
std::string str;
tree.ForEachElement([&str, &tree](const ShapeIndex& shape_index, T element) {
auto subshape = ShapeUtil::GetSubshape(tree.shape(), (shape_index));
absl::StrAppend(&str, shape_index.ToString(), ", ", subshape.ToString(),
": ", ToString(element), "\n");
});
return str;
}
}
absl::Status EinsumDepthAnalysis::RunInternal(
const HloComputation& computation,
const std::optional<ShapeTree<int>>& root_depth) {
std::vector<HloInstruction*> roots =
GetAllInstructionsWithZeroUsers(computation);
for (HloInstruction* root : roots) {
if (root == computation.root_instruction()) {
if (root_depth.has_value()) {
TF_RETURN_IF_ERROR(SetInstructionDepth(root, *root_depth));
} else {
TF_RETURN_IF_ERROR(SetInstructionDepth(root, 0));
}
} else {
GetOrCreateDepthTree(root);
}
}
HloPreOrderDFS dfs;
return dfs.Run(computation, this);
}
absl::StatusOr<std::unique_ptr<EinsumDepthAnalysis>> EinsumDepthAnalysis::Run(
const HloComputation& computation,
const SendRecvGroupMap& send_recv_group_map) {
EinsumDepthAnalysis* analysis_ptr =
new EinsumDepthAnalysis(send_recv_group_map);
std::unique_ptr<EinsumDepthAnalysis> analysis(analysis_ptr);
TF_RETURN_IF_ERROR(analysis->RunInternal(computation, std::nullopt));
return analysis;
}
namespace {
int MergeDepth(int original_depth, int new_depth) {
if (new_depth >= 0) {
return std::max(original_depth, new_depth);
}
if (new_depth < 0 && original_depth < 0) {
return std::min(original_depth, new_depth);
}
return original_depth;
}
void SetDepth(ShapeTree<int>& depth_tree, int depth) {
depth_tree.ForEachMutableElement(
[depth, &depth_tree](const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
*depth_ptr = MergeDepth(*depth_ptr, depth);
}
});
}
void SetDepth(ShapeTree<int>& depth_tree, const ShapeTree<int>& source) {
depth_tree.ForEachMutableElement(
[&depth_tree, &source](const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
*depth_ptr = MergeDepth(*depth_ptr, source.element(shape_index));
}
});
}
int GetMaxDepth(const ShapeTree<int>& depth_tree) {
int max_depth = -1;
depth_tree.ForEachElement(
[&max_depth](const ShapeIndex& shape_index, int depth) {
max_depth = std::max(max_depth, depth);
return absl::OkStatus();
});
if (max_depth >= 0) {
return max_depth;
}
depth_tree.ForEachElement(
[&max_depth](const ShapeIndex& shape_index, int depth) {
max_depth = std::min(max_depth, depth);
return absl::OkStatus();
});
return max_depth;
}
void SetDepthFromTupleDepth(ShapeTree<int>& depth_tree,
const ShapeTree<int>& tuple_depth_tree,
int tuple_index) {
depth_tree.ForEachMutableElement(
[&depth_tree, &tuple_depth_tree, tuple_index](
const ShapeIndex& shape_index, int* depth_ptr) {
if (depth_tree.IsLeaf(shape_index)) {
ShapeIndex output_index = shape_index;
output_index.push_front(tuple_index);
*depth_ptr =
MergeDepth(*depth_ptr, tuple_depth_tree.element(output_index));
}
});
}
}
ShapeTree<int>& EinsumDepthAnalysis::GetOrCreateDepthTree(
const HloInstruction* instruction) {
auto depth_iter = einsum_depth_map_.find(instruction);
if (depth_iter == einsum_depth_map_.end()) {
ShapeTree<int> depth_tree(instruction->shape(), -1);
auto inserted = einsum_depth_map_.insert(
std::make_pair(instruction, std::move(depth_tree)));
depth_iter = inserted.first;
}
return depth_iter->second;
}
ShapeTree<int>& EinsumDepthAnalysis::GetDepthTreeOrDie(
const HloInstruction* instruction) {
auto depth_iter = einsum_depth_map_.find(instruction);
CHECK(depth_iter != einsum_depth_map_.end())
<< "No depth tree found for instruction: " << instruction->ToString();
return depth_iter->second;
}
absl::Status EinsumDepthAnalysis::SetInstructionDepth(
const HloInstruction* instruction, int depth) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepth(depth_tree, depth);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::SetInstructionDepth(
const HloInstruction* instruction, const ShapeTree<int>& depth) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepth(depth_tree, depth);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::SetInstructionDepthFromTupleDepth(
const HloInstruction* instruction, const ShapeTree<int>& tuple_depth_tree,
int tuple_index) {
ShapeTree<int>& depth_tree = GetOrCreateDepthTree(instruction);
SetDepthFromTupleDepth(depth_tree, tuple_depth_tree, tuple_index);
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::DefaultAction(HloInstruction* instruction) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(instruction);
int max_depth = GetMaxDepth(depth_tree);
for (int operand_index = 0; operand_index < instruction->operand_count();
++operand_index) {
const HloInstruction* operand = instruction->operand(operand_index);
TF_RETURN_IF_ERROR(SetInstructionDepth(operand, max_depth));
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleTuple(HloInstruction* tuple) {
return HandleTupleLike(tuple);
}
absl::Status EinsumDepthAnalysis::HandleAllReduce(HloInstruction* all_reduce) {
if (all_reduce->shape().IsArray()) {
return DefaultAction(all_reduce);
}
return HandleTupleLike(all_reduce);
}
absl::Status EinsumDepthAnalysis::HandleTupleLike(HloInstruction* tuple_like) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(tuple_like);
for (int operand_index = 0; operand_index < tuple_like->operand_count();
++operand_index) {
HloInstruction* operand = tuple_like->mutable_operand(operand_index);
ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
SetDepthFromTupleDepth(operand_depth, depth_tree, operand_index);
}
return absl::OkStatus();
}
absl::Status EinsumDepthAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
const ShapeTree<int>& depth_tree = GetDepthTreeOrDie(get_tuple_element);
HloInstruction* operand = get_tuple_element->mutable_operand(0);
int tuple_index = get_tuple_element->tuple_index();
ShapeTree<int>& operand_depth = GetOrCreateDepthTree(operand);
operand_depth.ForEachMutableElement(
[&operand_depth, &depth_tree, tuple_index](const ShapeIndex& shape_index,
int* depth_ptr) {
if (shape_index.empty() || shape_index.front() != tuple_index) {
return;
} | #include "xla/service/hlo_value_semantics_analysis.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const char kMnistHlo[] = R"(
HloModule MnistTrainingLoopWithInfeed.140, entry_computation_layout={(f32[784,128]{1,0:T(8,128)},f32[128]{0:T(256)},f32[128,32]{1,0:T(8,128)},f32[32]{0:T(256)},f32[32,10]{1,0:T(8,128)},f32[10]{0:T(256)})->(f32[784,128]{1,0:T(8,128)}, f32[128]{0:T(256)}, f32[128,32]{1,0:T(8,128)}, f32[32]{0:T(256)}, f32[32,10]{1,0:T(8,128)}, f32[10]{0:T(256)})}
relu.9 {
x.10 = f32[] parameter(0)
constant.11 = f32[] constant(0)
ROOT maximum.12 = f32[] maximum(x.10, constant.11)
}
max_F32.17 {
lhs.18 = f32[] parameter(0)
rhs.19 = f32[] parameter(1)
ROOT maximum.20 = f32[] maximum(lhs.18, rhs.19)
}
add_F32.1 {
lhs.22 = f32[] parameter(0)
rhs.23 = f32[] parameter(1)
ROOT add.24 = f32[] add(lhs.22, rhs.23)
}
relu_gradients.29 {
activation.30 = f32[] parameter(0)
constant.32 = f32[] constant(0)
compare.33 = pred[] compare(activation.30, constant.32), direction=GT
backprop.31 = f32[] parameter(1)
ROOT select.34 = f32[] select(compare.33, backprop.31, constant.32)
}
body.49 {
after-all.51 = token[] after-all()
infeed.52 = ((f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]), token[]) infeed(after-all.51)
get.53 = (f32[100,784]{1,0}, f32[100,10]{1,0}, pred[]) get-tuple-element(infeed.52), index=0
get.54 = f32[100,784]{1,0} get-tuple-element(get.53), index=0
prev.50 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0)
get.57 = f32[784,128]{1,0} get-tuple-element(prev.50), index=0
dot.63 = f32[100,128]{1,0} dot(get.54, get.57), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.58 = f32[128]{0} get-tuple-element(prev.50), index=1
broadcast.64 = f32[100,128]{1,0} broadcast(get.58), dimensions={1}
add.65 = f32[100,128]{1,0} add(dot.63, broadcast.64)
map.66 = f32[100,128]{1,0} map(add.65), dimensions={0,1}, to_apply=relu.9
get.59 = f32[128,32]{1,0} get-tuple-element(prev.50), index=2
dot.67 = f32[100,32]{1,0} dot(map.66, get.59), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.60 = f32[32]{0} get-tuple-element(prev.50), index=3
broadcast.68 = f32[100,32]{1,0} broadcast(get.60), dimensions={1}
add.69 = f32[100,32]{1,0} add(dot.67, broadcast.68)
map.70 = f32[100,32]{1,0} map(add.69), dimensions={0,1}, to_apply=relu.9
get.61 = f32[32,10]{1,0} get-tuple-element(prev.50), index=4
dot.71 = f32[100,10]{1,0} dot(map.70, get.61), lhs_contracting_dims={1}, rhs_contracting_dims={0}
get.62 = f32[10]{0} get-tuple-element(prev.50), index=5
broadcast.72 = f32[100,10]{1,0} broadcast(get.62), dimensions={1}
add.73 = f32[100,10]{1,0} add(dot.71, broadcast.72)
constant.74 = f32[] constant(-inf)
reduce.75 = f32[100]{0} reduce(add.73, constant.74), dimensions={1}, to_apply=max_F32.17
broadcast.76 = f32[100,10]{1,0} broadcast(reduce.75), dimensions={0}
subtract.77 = f32[100,10]{1,0} subtract(add.73, broadcast.76)
exponential.78 = f32[100,10]{1,0} exponential(subtract.77)
constant.79 = f32[] constant(0)
reduce.80 = f32[100]{0} reduce(exponential.78, constant.79), dimensions={1}, to_apply=add_F32.1
broadcast.81 = f32[100,10]{1,0} broadcast(reduce.80), dimensions={0}
divide.82 = f32[100,10]{1,0} divide(exponential.78, broadcast.81)
get.55 = f32[100,10]{1,0} get-tuple-element(get.53), index=1
subtract.83 = f32[100,10]{1,0} subtract(divide.82, get.55)
transpose.88 = f32[10,32]{0,1} transpose(get.61), dimensions={1,0}
dot.89 = f32[100,32]{1,0} dot(subtract.83, transpose.88), lhs_contracting_dims={1}, rhs_contracting_dims={0}
map.90 = f32[100,32]{1,0} map(map.70, dot.89), dimensions={0,1}, to_apply=relu_gradients.29
transpose.95 = f32[32,128]{0,1} transpose(get.59), dimensions={1,0}
dot.96 = f32[100,128]{1,0} dot(map.90, transpose.95), lhs_contracting_dims={1}, rhs_contracting_dims={0}
map.97 = f32[100,128]{1,0} map(map.66, dot.96), dimensions={0,1}, to_apply=relu_gradients.29
transpose.98 = f32[784,100]{0,1} transpose(get.54), dimensions={1,0}
dot.99 = f32[784,128]{1,0} dot(transpose.98, map.97), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.104 = f32[] constant(0.01)
broadcast.105 = f32[784,128]{1,0} broadcast(constant.104), dimensions={}
multiply.106 = f32[784,128]{1,0} multiply(dot.99, broadcast.105)
subtract.107 = f32[784,128]{1,0} subtract(get.57, multiply.106)
reduce.101 = f32[128]{0} reduce(map.97, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.109 = f32[128]{0} broadcast(constant.104), dimensions={}
multiply.110 = f32[128]{0} multiply(reduce.101, broadcast.109)
subtract.111 = f32[128]{0} subtract(get.58, multiply.110)
transpose.91 = f32[128,100]{0,1} transpose(map.66), dimensions={1,0}
dot.92 = f32[128,32]{1,0} dot(transpose.91, map.90), lhs_contracting_dims={1}, rhs_contracting_dims={0}
broadcast.113 = f32[128,32]{1,0} broadcast(constant.104), dimensions={}
multiply.114 = f32[128,32]{1,0} multiply(dot.92, broadcast.113)
subtract.115 = f32[128,32]{1,0} subtract(get.59, multiply.114)
reduce.94 = f32[32]{0} reduce(map.90, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.117 = f32[32]{0} broadcast(constant.104), dimensions={}
multiply.118 = f32[32]{0} multiply(reduce.94, broadcast.117)
subtract.119 = f32[32]{0} subtract(get.60, multiply.118)
transpose.84 = f32[32,100]{0,1} transpose(map.70), dimensions={1,0}
dot.85 = f32[32,10]{1,0} dot(transpose.84, subtract.83), lhs_contracting_dims={1}, rhs_contracting_dims={0}
broadcast.121 = f32[32,10]{1,0} broadcast(constant.104), dimensions={}
multiply.122 = f32[32,10]{1,0} multiply(dot.85, broadcast.121)
subtract.123 = f32[32,10]{1,0} subtract(get.61, multiply.122)
reduce.87 = f32[10]{0} reduce(subtract.83, constant.79), dimensions={0}, to_apply=add_F32.1
broadcast.125 = f32[10]{0} broadcast(constant.104), dimensions={}
multiply.126 = f32[10]{0} multiply(reduce.87, broadcast.125)
subtract.127 = f32[10]{0} subtract(get.62, multiply.126)
get.56 = pred[] get-tuple-element(get.53), index=2
ROOT tuple.128 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(subtract.107, subtract.111, subtract.115, subtract.119, subtract.123, subtract.127, get.56)
}
condition.129 {
prev.130 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) parameter(0)
ROOT get.131 = pred[] get-tuple-element(prev.130), index=6
}
ENTRY MnistTrainingLoopWithInfeed.140 {
layer1_weights.1 = f32[784,128]{1,0} parameter(0)
layer1_biases.2 = f32[128]{0} parameter(1)
layer2_weights.3 = f32[128,32]{1,0} parameter(2)
layer2_biases.4 = f32[32]{0} parameter(3)
layer3_weights.5 = f32[32,10]{1,0} parameter(4)
layer3_biases.6 = f32[10]{0} parameter(5)
constant.7 = pred[] constant(true)
tuple.8 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) tuple(layer1_weights.1, layer1_biases.2, layer2_weights.3, layer2_biases.4, layer3_weights.5, layer3_biases.6, constant.7)
while.132 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}, pred[]) while(tuple.8), condition=condition.129, body=body.49
get.133 = f32[784,128]{1,0} get-tuple-element(while.132), index=0
get.134 = f32[128]{0} get-tuple-element(while.132), index=1
get.135 = f32[128,32]{1,0} get-tuple-element(while.132), index=2
get.136 = f32[32]{0} get-tuple-element(while.132), index=3
get.137 = f32[32,10]{1,0} get-tuple-element(while.132), index=4
get.138 = f32[10]{0} get-tuple-element(while.132), index=5
ROOT tuple.139 = (f32[784,128]{1,0}, f32[128]{0}, f32[128,32]{1,0}, f32[32]{0}, f32[32,10]{1,0}, f32[10]{0}) tuple(get.133, get.134, get.135, get.136, get.137, get.138)
}
)";
class HloValueSemanticsAnalysisTest : public HloTestBase {
public:
bool HasLabel(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name,
const HloValueSemanticLabel& expected_label) {
HloInstruction* instruction = FindInstruction(module, instruction_name);
const HloValueSemantics* semantics =
hlo_value_semantics_analysis.GetSemantics(instruction);
LOG(INFO) << "instruction: " << instruction->ToString()
<< semantics->ToString();
return semantics->label() == expected_label;
}
bool IsStatic(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kStatic);
}
bool IsWeight(const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kWeight);
}
bool IsActivation(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kActivation);
}
bool IsActivationGradient(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kActivationGradient);
}
bool IsWeightGradient(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kWeightGradient);
}
bool IsTupleOrToken(
const HloValueSemanticsAnalysis& hlo_value_semantics_analysis,
HloModule* module, absl::string_view instruction_name) {
return HasLabel(hlo_value_semantics_analysis, module, instruction_name,
HloValueSemanticLabel::kTupleOrToken);
}
};
TEST_F(HloValueSemanticsAnalysisTest, OneMatmul) {
const std::string module_str = R"(
HloModule OneMatmul
region_0.39 {
Arg_0.40 = f32[] parameter(0)
Arg_1.41 = f32[] parameter(1)
ROOT add.42 = f32[] add(Arg_0.40, Arg_1.41)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_7.8 = f32[4,32]{1,0} parameter(1), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_7.8), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,128]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[2,1]0,1}
maximum.33 = f32[4,128]{1,0} maximum(dot.0, broadcast.2), sharding={devices=[2,1]0,1}
compare.34 = pred[4,128]{1,0} compare(dot.0, maximum.33), direction=EQ, sharding={devices=[2,1]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,128]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[2,1]0,1}
select.35 = f32[4,128]{1,0} select(compare.34, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
dot.2 = f32[32,128]{0,1} dot(copy, select.35), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.11 = f32[] constant(-0.01), sharding={replicated}
broadcast.12 = f32[32,128]{1,0} broadcast(constant.11), dimensions={}, sharding={devices=[2,1]0,1}
multiply.52 = f32[32,128]{0,1} multiply(dot.2, broadcast.12), sharding={devices=[2,1]0,1}
add.93 = f32[32,128]{1,0} add(Arg_1.2, multiply.52), sharding={devices=[2,1]0,1}
reduce.43 = f32[] reduce(maximum.33, constant.5), dimensions={0,1}, to_apply=region_0.39, sharding={replicated}
ROOT tuple.109 = (f32[32,128]{1,0}, f32[]) tuple(add.93, reduce.43), sharding={{devices=[2,1]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.35"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
}
TEST_F(HloValueSemanticsAnalysisTest, HandleConditional) {
const std::string module_str = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
tgte1 = f32[4] ceil(tparam)
ROOT tuple = (f32[4], f32[4]) tuple(tparam, tgte1)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) abs-start(f32[4] fparam), async_execution_thread="parallel_thread"
%async-done = f32[4] abs-done(((f32[4]), f32[4], s32[]) %async-start)
ROOT tuple = (f32[4], f32[4]) tuple(fparam, %async-done)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = (f32[4], f32[4]) conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_TRUE(IsTupleOrToken(*hlo_value_semantics_analysis, module.get(),
"conditional"));
}
TEST_F(HloValueSemanticsAnalysisTest, TwoMatmuls) {
const std::string module_str = R"(
HloModule TwoMatmuls
region_0.44 {
Arg_0.45 = f32[] parameter(0)
Arg_1.46 = f32[] parameter(1)
ROOT add.47 = f32[] add(Arg_0.45, Arg_1.46)
}
ENTRY entry {
Arg_1.2 = f32[32,128]{1,0} parameter(0), sharding={devices=[2,1]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(copy, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
Arg_2.3 = f32[128,8]{1,0} parameter(1), sharding={devices=[1,2]0,1}
dot.1 = f32[4,8]{1,0} dot(dot.0, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
constant.5 = f32[] constant(0), sharding={replicated}
broadcast.1 = f32[4,8]{1,0} broadcast(constant.5), dimensions={}, sharding={devices=[1,2]0,1}
maximum.38 = f32[4,8]{1,0} maximum(dot.1, broadcast.1), sharding={devices=[1,2]0,1}
compare.39 = pred[4,8]{1,0} compare(dot.1, maximum.38), direction=EQ, sharding={devices=[1,2]0,1}
constant.4 = f32[] constant(1), sharding={replicated}
broadcast.0 = f32[4,8]{1,0} broadcast(constant.4), dimensions={}, sharding={devices=[1,2]0,1}
select.40 = f32[4,8]{1,0} select(compare.39, broadcast.0, broadcast.1), sharding={devices=[1,2]0,1}
dot.2 = f32[4,128]{1,0} dot(select.40, Arg_2.3), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
dot.5 = f32[32,128]{0,1} dot(copy, dot.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.12 = f32[] constant(-0.01), sharding={replicated}
broadcast.13 = f32[32,128]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[2,1]0,1}
multiply.68 = f32[32,128]{0,1} multiply(dot.5, broadcast.13), sharding={devices=[2,1]0,1}
add.79 = f32[32,128]{1,0} add(Arg_1.2, multiply.68), sharding={devices=[2,1]0,1}
dot.6 = f32[128,8]{0,1} dot(dot.0, select.40), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
broadcast.11 = f32[128,8]{1,0} broadcast(constant.12), dimensions={}, sharding={devices=[1,2]0,1}
multiply.69 = f32[128,8]{0,1} multiply(dot.6, broadcast.11), sharding={devices=[1,2]0,1}
add.80 = f32[128,8]{1,0} add(Arg_2.3, multiply.69), sharding={devices=[1,2]0,1}
reduce.48 = f32[] reduce(maximum.38, constant.5), dimensions={0,1}, to_apply=region_0.44, sharding={replicated}
ROOT tuple.95 = (f32[32,128]{1,0}, f32[128,8]{1,0}, f32[]) tuple(add.79, add.80, reduce.48), sharding={{devices=[2,1]0,1}, {devices=[1,2]0,1}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 1,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloValueSemanticsAnalysis> hlo_value_semantics_analysis,
HloValueSemanticsAnalysis::Run(*module));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "copy"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_1.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.0"));
EXPECT_FALSE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "Arg_2.3"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.1"));
EXPECT_TRUE(
IsStatic(*hlo_value_semantics_analysis, module.get(), "select.40"));
EXPECT_TRUE(IsWeight(*hlo_value_semantics_analysis, module.get(), "dot.2"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.5"));
EXPECT_TRUE(
IsActivation(*hlo_value_semantics_analysis, module.get(), "dot.6"));
}
TEST_F(HloValueSemanticsAnalysisTest, RepeatWhile) {
const std::string module_str = R"(
HloModule RepeatWhile
region_0.52 {
arg_tuple.53 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.54 = s32[] get-tuple-element(arg_tuple.53), index=0, sharding={replicated}
constant.61 = s32[] constant(1), sharding={replicated}
add.105 = s32[] add(get-tuple-element.54, constant.61), sharding={replicated}
get-tuple-element.55 = f32[4,32]{1,0} get-tuple-element(arg_tuple.53), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.59 = f32[3,32,128]{2,1,0} get-tuple-element(arg_tuple.53), index=5, sharding={devices=[1,2,1]0,1}
constant.69 = s32[] constant(0), sharding={replicated}
compare.70 = pred[] compare(get-tuple-element.54, constant.69), direction=LT, sharding={replicated}
constant.68 = s32[] constant(3), sharding={replicated}
add.71 = s32[] add(get-tuple-element.54, constant.68), sharding={replicated}
select.72 = s32[] select(compare.70, add.71, get-tuple-element.54), sharding={replicated}
dynamic-slice.73 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.59, select.72, constant.69, constant.69), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.74 = f32[32,128]{1,0} reshape(dynamic-slice.73), sharding={devices=[2,1]0,1}
dot.0 = f32[4,128]{1,0} dot(get-tuple-element.55, reshape.74), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
get-tuple-element.60 = f32[3,128,32]{2,1,0} get-tuple-element(arg_tuple.53), index=6, sharding={devices=[1,1,2]0,1}
dynamic-slice.78 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.60, select.72, constant.69, constant.69), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.79 = f32[128,32]{1,0} reshape(dynamic-slice.78), sharding={devices=[1,2]0,1}
dot.1 = f32[4,32]{1,0} dot(dot.0, reshape.79), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
constant.43 = f32[] constant(0), sharding={replicated}
broadcast.2 = f32[4,32]{1,0} broadcast(constant.43), dimensions={}, sharding={devices=[2,1]0,1}
maximum.84 = f32[4,32]{1,0} maximum(dot.1, broadcast.2), sharding={devices=[2,1]0,1}
get-tuple-element.56 = f32[3,4,128]{2,1,0} get-tuple-element(arg_tuple.53), index=2, sharding={devices=[1,2,1]0,1}
reshape.90 = f32[1,4,128]{2,1,0} reshape(dot.0), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.94 = f32[3,4,128]{2,1,0} dynamic-update-slice(get-tuple-element.56, reshape.90, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.57 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=3, sharding={devices=[1,2,1]0,1}
compare.85 = pred[4,32]{1,0} compare(dot.1, maximum.84), direction=EQ, sharding={devices=[2,1]0,1}
constant.42 = f32[] constant(1), sharding={replicated}
broadcast.1 = f32[4,32]{1,0} broadcast(constant.42), dimensions={}, sharding={devices=[2,1]0,1}
select.86 = f32[4,32]{1,0} select(compare.85, broadcast.1, broadcast.2), sharding={devices=[2,1]0,1}
reshape.95 = f32[1,4,32]{2,1,0} reshape(select.86), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.99 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.57, reshape.95, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
get-tuple-element.58 = f32[3,4,32]{2,1,0} get-tuple-element(arg_tuple.53), index=4, sharding={devices=[1,2,1]0,1}
reshape.100 = f32[1,4,32]{2,1,0} reshape(get-tuple-element.55), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.104 = f32[3,4,32]{2,1,0} dynamic-update-slice(get-tuple-element.58, reshape.100, select.72, constant.69, constant.69), sharding={devices=[1,2,1]0,1}
ROOT tuple.106 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(add.105, maximum.84, dynamic-update-slice.94, dynamic-update-slice.99, dynamic-update-slice.104, get-tuple-element.59, get-tuple-element.60), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
}
region_1.107 {
arg_tuple.108 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.109 = s32[] get-tuple-element(arg_tuple.108), index=0, sharding={replicated}
constant.116 = s32[] constant(3)
ROOT compare.117 = pred[] compare(get-tuple-element.109, constant.116), direction=LT
}
region_2.126 {
Arg_0.127 = f32[] parameter(0)
Arg_1.128 = f32[] parameter(1)
ROOT add.129 = f32[] add(Arg_0.127, Arg_1.128)
}
wide.wide.region_3.156.clone.clone {
wide_param.7 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.185 = s32[] get-tuple-element(wide_param.7), index=0, sharding={replicated}
constant.34 = s32[] constant(1), sharding={replicated}
add.14 = s32[] add(get-tuple-element.185, constant.34), sharding={replicated}
get-tuple-element.186 = f32[4,32]{1,0} get-tuple-element(wide_param.7), index=1, sharding={devices=[2,1]0,1}
get-tuple-element.190 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=5, sharding={devices=[1,2,1]0,1}
constant.35 = s32[] constant(3), sharding={replicated}
subtract.3 = s32[] subtract(constant.35, get-tuple-element.185), sharding={replicated}
constant.6..sunk.4 = s32[] constant(-1), sharding={replicated}
add.15 = s32[] add(subtract.3, constant.6..sunk.4), sharding={replicated}
constant.36 = s32[] constant(0), sharding={replicated}
compare.7 = pred[] compare(add.15, constant.36), direction=LT, sharding={replicated}
constant.26..sunk.1 = s32[] constant(2), sharding={replicated}
add.16 = s32[] add(subtract.3, constant.26..sunk.1), sharding={replicated}
select.4 = s32[] select(compare.7, add.16, add.15), sharding={replicated}
dynamic-slice.15 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.190, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.21 = f32[4,32]{1,0} reshape(dynamic-slice.15), sharding={devices=[2,1]0,1}
multiply.3 = f32[4,32]{1,0} multiply(get-tuple-element.186, reshape.21), sharding={devices=[2,1]0,1}
get-tuple-element.192 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=7, sharding={devices=[1,1,2]0,1}
dynamic-slice.16 = f32[1,128,32]{2,1,0} dynamic-slice(get-tuple-element.192, select.4, constant.36, constant.36), dynamic_slice_sizes={1,128,32}, sharding={devices=[1,1,2]0,1}
reshape.22 = f32[128,32]{1,0} reshape(dynamic-slice.16), sharding={devices=[1,2]0,1}
dot.20 = f32[4,128]{1,0} dot(multiply.3, reshape.22), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,1]0,1}
get-tuple-element.191 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=6, sharding={devices=[1,2,1]0,1}
dynamic-slice.17 = f32[1,32,128]{2,1,0} dynamic-slice(get-tuple-element.191, select.4, constant.36, constant.36), dynamic_slice_sizes={1,32,128}, sharding={devices=[1,2,1]0,1}
reshape.23 = f32[32,128]{1,0} reshape(dynamic-slice.17), sharding={devices=[2,1]0,1}
dot.21 = f32[4,32]{1,0} dot(dot.20, reshape.23), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[1,2]0,1}
get-tuple-element.187 = f32[3,32,128]{2,1,0} get-tuple-element(wide_param.7), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.193 = f32[3,4,32]{2,1,0} get-tuple-element(wide_param.7), index=8, sharding={devices=[1,2,1]0,1}
dynamic-slice.18 = f32[1,4,32]{2,1,0} dynamic-slice(get-tuple-element.193, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,32}, sharding={devices=[1,2,1]0,1}
reshape.24 = f32[4,32]{1,0} reshape(dynamic-slice.18), sharding={devices=[2,1]0,1}
dot.22 = f32[32,128]{0,1} dot(reshape.24, dot.20), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[2,1]0,1}
reshape.25 = f32[1,32,128]{2,1,0} reshape(dot.22), sharding={devices=[1,2,1]0,1}
dynamic-update-slice.6 = f32[3,32,128]{2,1,0} dynamic-update-slice(get-tuple-element.187, reshape.25, select.4, constant.36, constant.36), sharding={devices=[1,2,1]0,1}
get-tuple-element.188 = f32[3,128,32]{2,1,0} get-tuple-element(wide_param.7), index=3, sharding={devices=[1,1,2]0,1}
get-tuple-element.189 = f32[3,4,128]{2,1,0} get-tuple-element(wide_param.7), index=4, sharding={devices=[1,2,1]0,1}
dynamic-slice.19 = f32[1,4,128]{2,1,0} dynamic-slice(get-tuple-element.189, select.4, constant.36, constant.36), dynamic_slice_sizes={1,4,128}, sharding={devices=[1,2,1]0,1}
reshape.26 = f32[4,128]{1,0} reshape(dynamic-slice.19), sharding={devices=[2,1]0,1}
dot.23 = f32[128,32]{0,1} dot(reshape.26, multiply.3), lhs_contracting_dims={0}, rhs_contracting_dims={0}, sharding={devices=[1,2]0,1}
reshape.27 = f32[1,128,32]{2,1,0} reshape(dot.23), sharding={devices=[1,1,2]0,1}
dynamic-update-slice.7 = f32[3,128,32]{2,1,0} dynamic-update-slice(get-tuple-element.188, reshape.27, select.4, constant.36, constant.36), sharding={devices=[1,1,2]0,1}
ROOT tuple.19 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(add.14, dot.21, dynamic-update-slice.6, dynamic-update-slice.7, get-tuple-element.189, get-tuple-element.190, get-tuple-element.191, get-tuple-element.192, get-tuple-element.193), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
}
wide.wide.region_4.218.clone.clone {
wide_param.6 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) parameter(0), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
get-tuple-element.184 = s32[] get-tuple-element(wide_param.6), index=0, sharding={replicated}
constant.28 = s32[] constant(3)
ROOT compare.6 = pred[] compare(get-tuple-element.184, constant.28), direction=LT
}
ENTRY entry {
Arg_1.2 = f32[3,32,128]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1}
constant.45 = s32[] constant(0), sharding={replicated}
constant.23 = f32[] constant(1), sharding={replicated}
broadcast.24 = f32[4,32]{1,0} broadcast(constant.23), dimensions={}, sharding={devices=[1,2]0,1}
constant.21 = f32[] constant(0), sharding={replicated}
broadcast.22 = f32[3,32,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.20 = f32[3,128,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,1,2]0,1}
Arg_8.9 = f32[4,32]{1,0} parameter(2), sharding={devices=[2,1]0,1}
copy = f32[4,32]{1,0} copy(Arg_8.9), sharding={devices=[2,1]0,1}
broadcast.28 = f32[3,4,128]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
broadcast.26 = f32[3,4,32]{2,1,0} broadcast(constant.21), dimensions={}, sharding={devices=[1,2,1]0,1}
Arg_2.3 = f32[3,128,32]{2,1,0} parameter(1), sharding={devices=[1,1,2]0,1}
tuple.42 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) tuple(constant.45, copy, broadcast.28, broadcast.26, broadcast.26, Arg_1.2, Arg_2.3), sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
while.118 = (s32[], f32[4,32]{1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}) while(tuple.42), condition=region_1.107, body=region_0.52, sharding={{replicated}, {devices=[2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}}
get-tuple-element.179 = f32[3,4,128]{2,1,0} get-tuple-element(while.118), index=2, sharding={devices=[1,2,1]0,1}
get-tuple-element.180 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=3, sharding={devices=[1,2,1]0,1}
get-tuple-element.183 = f32[3,4,32]{2,1,0} get-tuple-element(while.118), index=4, sharding={devices=[1,2,1]0,1}
tuple.18 = (s32[], f32[4,32]{1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,128]{2,1,0}, f32[3,4,32]{2,1,0}, f32[3,32,128]{2,1,0}, f32[3,128,32]{2,1,0}, f32[3,4,32]{2,1,0}) tuple(constant.45, broadcast.24, broadcast.22, broadcast.20, get-tuple-element.179, get-tuple-element.180, Arg_1.2, Arg_2.3, get-tuple-element.183), sharding={{replicated}, {devices=[1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,2,1]0,1}, {devices=[1,1,2]0,1}, {devices=[1,2,1]0,1}}
while.3 = (s32[], f3 |
1,987 | cpp | tensorflow/tensorflow | convert_async_collectives_to_sync | third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc | third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc | #ifndef XLA_SERVICE_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#define XLA_SERVICE_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#include <utility>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConvertAsyncCollectivesToSync : public HloModulePass {
public:
explicit ConvertAsyncCollectivesToSync(HloPredicate is_nop = {})
: is_nop_(is_nop) {}
absl::string_view name() const override {
return "convert-async-collectives-to-sync";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
virtual absl::Status ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const {
return ReplaceAsyncInstructionsWithSync(computation, async_pairs);
}
static absl::Status ReplaceAsyncInstructionsWithSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>>
async_pairs);
static constexpr char kAsyncCollectiveNameAttributeName[] =
"async_collective_name";
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
HloPredicate is_nop_;
};
}
#endif
#include "xla/service/convert_async_collectives_to_sync.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<HloInstruction*> CreateSyncVariant(HloInstruction* async_start,
HloInstruction* async_done) {
HloInstruction* sync_instruction = nullptr;
HloComputation* computation = async_start->parent();
const HloOpcode async_start_op = async_start->opcode();
switch (async_start_op) {
case HloOpcode::kAllReduceStart: {
auto* async_ar = Cast<HloAllReduceInstruction>(async_start);
sync_instruction =
computation->AddInstruction(HloInstruction::CreateAllReduce(
async_done->shape(), async_ar->operands(), async_ar->to_apply(),
async_ar->device_list(), async_ar->constrain_layout(),
async_ar->channel_id(), async_ar->use_global_device_ids()));
break;
}
case HloOpcode::kAllGatherStart: {
auto* async_ag = Cast<HloAllGatherInstruction>(async_start);
sync_instruction =
computation->AddInstruction(HloInstruction::CreateAllGather(
async_done->shape(), async_ag->operands(),
async_ag->all_gather_dimension(), async_ag->device_list(),
async_ag->constrain_layout(), async_ag->channel_id(),
async_ag->use_global_device_ids()));
break;
}
case HloOpcode::kCollectivePermuteStart: {
auto* async_cp = Cast<HloCollectivePermuteInstruction>(async_start);
TF_RET_CHECK(async_cp->operand_count() == 1);
sync_instruction =
computation->AddInstruction(HloInstruction::CreateCollectivePermute(
async_done->shape(), async_cp->mutable_operand(0),
async_cp->source_target_pairs(), async_cp->channel_id()));
break;
}
case HloOpcode::kAsyncStart: {
auto* as_start = Cast<HloAsyncInstruction>(async_start);
HloInstruction* wrapped = as_start->async_wrapped_instruction();
sync_instruction =
computation->AddInstruction(wrapped->CloneWithNewOperands(
async_done->shape(), as_start->operands()));
break;
}
default:
return Internal("Unexpected async start op %s",
HloOpcodeString(async_start->opcode()));
}
sync_instruction->set_metadata(async_start->metadata());
sync_instruction->CopyBackendConfigFrom(async_start);
TF_RETURN_IF_ERROR(async_done->ReplaceAllUsesWith(sync_instruction));
TF_RETURN_IF_ERROR(async_start->DropAllControlDeps());
TF_RETURN_IF_ERROR(async_done->DropAllControlDeps());
bool is_async_start_removed = false;
auto track_async_start_removed = [&](const HloInstruction* instr) {
is_async_start_removed |= instr == async_start;
};
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(
async_done, track_async_start_removed));
if (!is_async_start_removed) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(async_start));
}
return sync_instruction;
}
absl::Status
ConvertAsyncCollectivesToSync::ReplaceAsyncInstructionsWithSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs) {
absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops;
for (auto& [async_start, async_done] : async_pairs) {
TF_ASSIGN_OR_RETURN(HloInstruction * sync,
CreateSyncVariant(async_start, async_done));
FrontendAttributes attributes;
auto& map = *attributes.mutable_map();
map[kAsyncCollectiveNameAttributeName] = async_start->name();
sync->add_frontend_attributes(std::move(attributes));
replaced_ops[async_start] = nullptr;
replaced_ops[async_done] = sync;
}
HloModule* module = computation->parent();
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
std::vector<HloInstruction*> new_sequence;
new_sequence.reserve(sequence.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_ops.find(instr);
if (it != replaced_ops.end()) {
if (it->second != nullptr) {
new_sequence.push_back(it->second);
}
} else {
new_sequence.push_back(instr);
}
}
module->schedule().set_sequence(computation, new_sequence);
return absl::OkStatus();
}
absl::StatusOr<bool> ConvertAsyncCollectivesToSync::RunOnComputation(
HloComputation* computation) {
HloModule* module = computation->parent();
std::vector<std::pair<HloInstruction*, HloInstruction*>> async_pairs;
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
absl::flat_hash_set<HloInstruction*> in_flight_ops;
for (HloInstruction* instruction : sequence.instructions()) {
if (hlo_query::IsAsyncCollectiveStartOp(instruction)) {
in_flight_ops.insert(instruction);
VLOG(3) << "Found async start " << instruction->ToString();
} else if (hlo_query::IsAsyncCollectiveDoneOp(instruction)) {
VLOG(3) << "Found async done " << instruction->ToString();
TF_RET_CHECK(instruction->operand_count() == 1);
HloInstruction* matching_async_start = instruction->mutable_operand(0);
if (in_flight_ops.erase(matching_async_start) == 1) {
async_pairs.push_back({matching_async_start, instruction});
VLOG(3) << "Added pair: {" << matching_async_start->name() << ", "
<< instruction->name();
}
} else if (!in_flight_ops.empty() && (!is_nop_ || !is_nop_(instruction))) {
VLOG(3) << "Found intervening non-NOP instruction "
<< instruction->ToString();
in_flight_ops.clear();
}
}
if (async_pairs.empty()) {
return false;
}
TF_RETURN_IF_ERROR(ConvertAsyncInstructionsToSync(computation, async_pairs));
return true;
}
absl::StatusOr<bool> ConvertAsyncCollectivesToSync::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!module->has_schedule()) {
VLOG(3) << "Skipping as module is not scheduled";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (!module->schedule().is_computation_scheduled(computation)) {
VLOG(3) << "Skipping computation" << computation->name()
<< " as it is not scheduled";
continue;
}
TF_ASSIGN_OR_RETURN(bool computation_changed,
RunOnComputation(computation));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/convert_async_collectives_to_sync.h"
#include <memory>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class ConvertAsyncCollectivesToSyncTest : public HloTestBase {
public:
absl::Status RunPass(HloModule *module, bool expect_change,
HloPredicate is_nop = {}) {
TF_ASSIGN_OR_RETURN(bool changed,
ConvertAsyncCollectivesToSync{is_nop}.Run(module));
EXPECT_EQ(changed, expect_change);
return absl::OkStatus();
}
absl::string_view GetAsyncName(const HloInstruction *inst) {
const auto &map = inst->frontend_attributes().map();
return map.at(
ConvertAsyncCollectivesToSync::kAsyncCollectiveNameAttributeName);
}
HloPredicate is_nop_simple_ =
HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>;
};
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::ReplicaId()));
const auto *ar = Cast<HloAllReduceInstruction>(root);
EXPECT_TRUE(ar->channel_id().has_value());
EXPECT_EQ(ar->channel_id().value(), 3);
EXPECT_EQ(GetAsyncName(ar), "start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}
id2 = f32[] bitcast(id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::ReplicaId()));
const auto *ar = Cast<HloAllReduceInstruction>(root);
EXPECT_TRUE(ar->channel_id().has_value());
EXPECT_EQ(ar->channel_id().value(), 3);
EXPECT_THAT(ar, m::ReplicaGroups({{0, 1}, {2, 3}}));
EXPECT_EQ(GetAsyncName(ar), "start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
id2 = u32[] add(id, id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), false));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllGather) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
a1 = u32[1, 2] parameter(0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3
ROOT allgather = u32[2,2] all-gather-done(ags)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllGather(m::Parameter(0)));
const auto *ag = Cast<HloAllGatherInstruction>(root);
EXPECT_TRUE(ag->channel_id().has_value());
EXPECT_EQ(ag->channel_id().value(), 3);
EXPECT_EQ(ag->all_gather_dimension(), 0);
EXPECT_EQ(GetAsyncName(ag), "ags");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
p = u32[2] parameter(0)
start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}
ROOT done = u32[2] collective-permute-done(start)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::CollectivePermute(m::Parameter(0)));
const auto *cp = Cast<HloCollectivePermuteInstruction>(root);
EXPECT_THAT(cp, m::SourceTargetPairs({{0, 1}, {1, 0}}));
EXPECT_EQ(GetAsyncName(cp), "start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},
dimensions={0}, to_apply=add
}
ENTRY main {
data = u32[8] parameter(0)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::ReduceScatter(m::Parameter(0)));
const auto *rs = Cast<HloReduceScatterInstruction>(root);
EXPECT_THAT(rs, m::ReplicaGroups({{0, 3}, {1, 2}}));
EXPECT_EQ(rs->scatter_dimension(), 0);
EXPECT_EQ(GetAsyncName(rs), "rs-start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}
}
ENTRY test_computation {
a1 = u32[2] parameter(0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllToAll(m::Parameter(0)));
const auto *a2a = Cast<HloAllToAllInstruction>(root);
EXPECT_THAT(a2a, m::ReplicaGroups({{0, 1}, {2, 3}}));
EXPECT_TRUE(a2a->split_dimension().has_value());
EXPECT_EQ(a2a->split_dimension().value(), 0);
EXPECT_EQ(GetAsyncName(a2a), "a2a-start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, ControlDeps) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
done1 = u32[] all-reduce-done(start1)
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduce(), m::AllReduce()));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done1 = u32[] all-reduce-done(start1)
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduce(), m::AllReduce()));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduce(), m::AllReduce()));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
id2 = u32[] add(done2, done2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduceDone(), m::AllReduce()));
}
}
} |
1,988 | cpp | tensorflow/tensorflow | host_memory_transfer_asyncifier | third_party/xla/xla/service/host_memory_transfer_asyncifier.cc | third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc | #ifndef XLA_SERVICE_HOST_MEMORY_TRANSFER_ASYNCIFIER_H_
#define XLA_SERVICE_HOST_MEMORY_TRANSFER_ASYNCIFIER_H_
#include <cstdint>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HostMemoryTransferAsyncifier : public HloModulePass {
public:
explicit HostMemoryTransferAsyncifier(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
~HostMemoryTransferAsyncifier() override = default;
absl::string_view name() const override {
return "host-memory-transfer-asyncifier";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const int64_t kHostMemorySpaceColor;
};
}
#endif
#include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HostMemoryTransferAsyncifierVisitor : public DfsHloVisitorWithDefault {
public:
explicit HostMemoryTransferAsyncifierVisitor(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
bool Changed() const { return changed_; }
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override {
HloInstruction* dynamic_slice_operand = dynamic_slice->mutable_operand(0);
if (!dynamic_slice->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), " does not have a layout.");
}
if (!dynamic_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), "'s operand, ",
dynamic_slice_operand->name(),
", does not have a layout.");
}
VLOG(3) << absl::StreamFormat(
"\"%s\" from S(%d) to S(%d)", dynamic_slice->name(),
dynamic_slice_operand->shape().layout().memory_space(),
dynamic_slice->shape().layout().memory_space());
if (dynamic_slice_operand->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_slice->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
VLOG(1) << "DynamicSlice \"" << dynamic_slice->name()
<< "\" is slicing from host memory. Converting to async.";
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
const Shape transfer_bytes_shape = ShapeUtil::MakeScalarShape(S32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
dynamic_slice->parent()->CreateAsyncInstructions(
dynamic_slice, {context_shape, transfer_bytes_shape}));
(void)async_done;
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override {
HloInstruction* dynamic_update_slice_operand =
dynamic_update_slice->mutable_operand(0);
HloInstruction* dynamic_update_slice_update =
dynamic_update_slice->mutable_operand(1);
if (!dynamic_update_slice->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(),
" does not have a layout.");
}
if (!dynamic_update_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s operand, ",
dynamic_update_slice_operand->name(),
", does not have a layout.");
}
if (!dynamic_update_slice_update->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s update, ",
dynamic_update_slice_update->name(),
", does not have a layout.");
}
if (dynamic_update_slice_update->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
if (dynamic_update_slice->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_update_slice_operand->shape().layout().memory_space() !=
dynamic_update_slice->shape().layout().memory_space()) {
return InternalStrCat(
"Unexpected that ", dynamic_update_slice_operand->name(),
"'s memory space is not the same as the dynamic-update-slice.");
}
VLOG(1) << "DynamicUpdateSlice \"" << dynamic_update_slice->name()
<< "\" is slicing into host memory space. Converting to async.";
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(HloInstruction * async_done,
dynamic_update_slice->parent()->CreateAsyncInstructions(
dynamic_update_slice, {context_shape}));
(void)async_done;
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleCopy(HloInstruction* copy) override {
HloInstruction* operand = copy->mutable_operand(0);
if (!operand->shape().has_layout()) {
return InternalStrCat(operand->name(), " does not have a layout.");
}
if (!copy->shape().has_layout()) {
return InternalStrCat(copy->name(), " does not have a layout.");
}
const auto copy_src_memory_space = operand->shape().layout().memory_space();
const auto copy_dst_memory_space = copy->shape().layout().memory_space();
if (!((copy_src_memory_space == kHostMemorySpaceColor &&
copy_dst_memory_space == xla::Layout::kDefaultMemorySpace) ||
(copy_src_memory_space == xla::Layout::kDefaultMemorySpace &&
copy_dst_memory_space == kHostMemorySpaceColor))) {
VLOG(2)
<< "Skipping copy because it is not a copy between device memory and "
"host memory: "
<< copy->ToString();
return absl::OkStatus();
}
VLOG(1)
<< "Copy \"" << copy->name()
<< "\" is between device and host memory space. Converting to async.";
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
copy->parent()->CreateAsyncInstructions(copy, {context_shape}));
(void)async_done;
MarkAsChanged();
return absl::OkStatus();
}
private:
const int64_t kHostMemorySpaceColor;
bool changed_ = false;
void MarkAsChanged() { changed_ = true; }
};
}
absl::StatusOr<bool> HostMemoryTransferAsyncifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HostMemoryTransferAsyncifierVisitor visitor(kHostMemorySpaceColor);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
}
return visitor.Changed();
}
} | #include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class HostMemoryTransferAsyncifierTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunAsyncifier(absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSIGN_OR_RETURN(bool changed, RunAsyncifier(module.get()));
return changed;
}
absl::StatusOr<bool> RunAsyncifier(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostMemoryTransferAsyncifier asyncifier(kHostMemorySpaceColor);
return asyncifier.Run(module);
}
private:
static constexpr int64_t kHostMemorySpaceColor{5};
};
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_update_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_update_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_update_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_update_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
}
} |
1,989 | cpp | tensorflow/tensorflow | hlo_verifier | third_party/xla/xla/service/hlo_verifier.cc | third_party/xla/xla/service/hlo_verifier_test.cc | #ifndef XLA_SERVICE_HLO_VERIFIER_H_
#define XLA_SERVICE_HLO_VERIFIER_H_
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
using ShapeSizeFn = std::function<int64_t(const Shape&)>;
struct HloVerifierOpts {
HloVerifierOpts&& MakeLayoutSensitive() {
layout_sensitive = true;
return std::move(*this);
}
HloVerifierOpts&& WithLayoutSensitive(bool layout_sensitive_p) {
layout_sensitive = layout_sensitive_p;
return std::move(*this);
}
HloVerifierOpts&& WithAllowMixedPrecision(bool allow_mixed_precision_p) {
allow_mixed_precision = allow_mixed_precision_p;
return std::move(*this);
}
HloVerifierOpts&& AllowMixedPrecision() {
allow_mixed_precision = true;
return std::move(*this);
}
HloVerifierOpts&& VerifyBroadcastDimensionsOrder() {
verify_broadcast_dimensions_order = true;
return std::move(*this);
}
HloVerifierOpts&& VerifyReshapeIsBitcast() {
verify_reshape_is_bitcast = true;
return std::move(*this);
}
HloVerifierOpts&& VerifyCustomCallNestedComputationThreadName() {
verify_custom_call_nested_computation_thread_name = true;
return std::move(*this);
}
HloVerifierOpts&& WithAllowBitcastToHaveDifferentSize(bool allow) {
allow_bitcast_to_have_different_size = allow;
return std::move(*this);
}
HloVerifierOpts&& WithInstructionCanChangeLayout(
const HloPredicate& instruction_can_change_layout_p) {
instruction_can_change_layout = instruction_can_change_layout_p;
return std::move(*this);
}
HloVerifierOpts&& WithCustomShapeSize(const ShapeSizeFn& shape_size_p) {
shape_size = shape_size_p;
return std::move(*this);
}
HloVerifierOpts&& WithVerifyShardingDeviceNumbers(bool verify) {
verify_sharding_device_numbers = verify;
return std::move(*this);
}
HloVerifierOpts&& WithVerifyS4U4Usage(bool verify) {
return std::move(*this);
}
HloVerifierOpts&& WithAllowUnboundedDynamism(bool allow) {
allow_unbounded_dynamism = allow;
return std::move(*this);
}
bool IsLayoutSensitive() const { return layout_sensitive; }
bool AllowMixedPrecision() const { return allow_mixed_precision; }
const HloPredicate& InstructionCanChangeLayout() const {
return instruction_can_change_layout;
}
bool InstructionCanChangeLayout(const HloInstruction* instruction) const {
return !instruction_can_change_layout ||
instruction_can_change_layout(instruction);
}
int64_t ShapeSize(const Shape& shape) const { return shape_size(shape); }
bool layout_sensitive = false;
bool allow_mixed_precision = false;
bool verify_broadcast_dimensions_order = false;
bool verify_reshape_is_bitcast = false;
bool verify_custom_call_nested_computation_thread_name = true;
bool verify_sharding_device_numbers = true;
bool allow_bitcast_to_have_different_size = false;
bool allow_unbounded_dynamism = false;
HloPredicate instruction_can_change_layout;
ShapeSizeFn shape_size = [](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape);
};
};
class ShapeVerifier : public DfsHloVisitor {
public:
explicit ShapeVerifier(const HloVerifierOpts& opts) : opts_(opts) {}
virtual absl::Status VerifyEntryComputationLayout(const HloModule& module);
absl::Status Preprocess(HloInstruction* hlo) override;
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override;
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override;
absl::Status HandleClamp(HloInstruction* clamp) override;
absl::Status HandleSelect(HloInstruction* select) override;
absl::Status HandleConcatenate(HloInstruction* concatenate) override;
absl::Status HandleIota(HloInstruction* hlo) override;
absl::Status HandleConvert(HloInstruction* convert) override;
absl::Status HandleBitcastConvert(HloInstruction* convert) override;
absl::Status HandleStochasticConvert(HloInstruction* convert) override;
absl::Status HandleCopy(HloInstruction* copy) override;
absl::Status HandleDot(HloInstruction* dot) override;
absl::Status HandleConvolution(HloInstruction* convolution) override;
absl::Status HandleFft(HloInstruction* fft) override;
absl::Status HandleCholesky(HloInstruction* hlo) override;
absl::Status HandleTriangularSolve(HloInstruction* hlo) override;
absl::Status HandleAllGather(HloInstruction* hlo) override;
absl::Status HandleAllGatherStart(HloInstruction* hlo) override;
absl::Status HandleAllGatherDone(HloInstruction* hlo) override;
absl::Status HandleAllReduce(HloInstruction* hlo) override;
absl::Status HandleAllReduceStart(HloInstruction* hlo) override;
absl::Status HandleAllReduceDone(HloInstruction* hlo) override;
absl::Status HandleAllToAll(HloInstruction* hlo) override;
absl::Status HandleCollectiveBroadcast(HloInstruction* hlo) override;
absl::Status HandleCollectivePermute(HloInstruction* hlo) override;
absl::Status HandleCollectivePermuteStart(HloInstruction* hlo) override;
absl::Status HandleCollectivePermuteDone(HloInstruction* hlo) override;
absl::Status HandlePartitionId(HloInstruction* hlo) override;
absl::Status HandleReplicaId(HloInstruction* hlo) override;
absl::Status HandleReducePrecision(HloInstruction* reduce_precision) override;
absl::Status HandleInfeed(HloInstruction*) override;
absl::Status HandleOptimizationBarrier(HloInstruction* hlo) override;
absl::Status HandleOutfeed(HloInstruction*) override;
absl::Status HandleRng(HloInstruction*) override;
absl::Status HandleRngBitGenerator(HloInstruction*) override;
absl::Status HandleRngGetAndUpdateState(HloInstruction*) override;
absl::Status HandleReverse(HloInstruction* reverse) override;
absl::Status HandleSort(HloInstruction* hlo) override;
absl::Status HandleTopK(HloInstruction* hlo) override;
absl::Status HandleConstant(HloInstruction* constant) override;
absl::Status HandleGetTupleElement(
HloInstruction* get_tuple_element) override;
absl::Status HandleReduce(HloInstruction* reduce) override;
absl::Status HandleBitcast(HloInstruction* bitcast) override;
absl::Status HandleBroadcast(HloInstruction* broadcast) override;
absl::Status HandleReshape(HloInstruction* reshape) override;
absl::Status HandleDynamicReshape(HloInstruction* dynamic_reshape) override;
absl::Status HandleTranspose(HloInstruction* transpose) override;
absl::Status HandleParameter(HloInstruction*) override;
absl::Status HandleFusion(HloInstruction*) override;
absl::Status HandleCall(HloInstruction* call) override;
absl::Status HandleCustomCall(HloInstruction*) override;
absl::Status HandleSlice(HloInstruction* slice) override;
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override;
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override;
absl::Status HandleTuple(HloInstruction* tuple) override;
absl::Status HandleMap(HloInstruction* map) override;
absl::Status HandleReduceScatter(HloInstruction* hlo) override;
absl::Status HandleReduceWindow(HloInstruction* reduce_window) override;
absl::Status HandleSelectAndScatter(HloInstruction* instruction) override;
absl::Status HandleWhile(HloInstruction* xla_while) override;
absl::Status HandleConditional(HloInstruction* conditional) override;
absl::Status HandlePad(HloInstruction* pad) override;
absl::Status HandleAsyncStart(HloInstruction* async_start) override;
absl::Status HandleAsyncUpdate(HloInstruction* async_update) override;
absl::Status HandleAsyncDone(HloInstruction* async_done) override;
absl::Status HandleCopyStart(HloInstruction* copy_start) override;
absl::Status HandleCopyDone(HloInstruction* copy_done) override;
absl::Status HandleSend(HloInstruction* send) override;
absl::Status HandleSendDone(HloInstruction* send_done) override;
absl::Status HandleRecv(HloInstruction* recv) override;
absl::Status HandleRecvDone(HloInstruction* recv_done) override;
absl::Status HandleBatchNormTraining(
HloInstruction* batch_norm_training) override;
absl::Status HandleBatchNormInference(
HloInstruction* batch_norm_inference) override;
absl::Status HandleBatchNormGrad(HloInstruction* batch_norm_grad) override;
absl::Status HandleGather(HloInstruction* gather) override;
absl::Status HandleScatter(HloInstruction* scatter) override;
absl::Status HandleAfterAll(HloInstruction* token) override;
absl::Status HandleGetDimensionSize(HloInstruction* get_size) override;
absl::Status HandleSetDimensionSize(HloInstruction* set_size) override;
absl::Status HandleAddDependency(HloInstruction* add_dependency) override;
absl::Status FinishVisit(HloInstruction*) override {
return absl::OkStatus();
}
protected:
bool ShapesSame(const Shape& a, const Shape& b, Shape::Equal equal = {});
absl::Status CheckShape(const HloInstruction* instruction,
const Shape& inferred_shape,
bool only_compare_minor_to_major_in_layout = false);
absl::Status CheckShape(const HloInstruction* instruction,
const absl::StatusOr<Shape>& inferred_shape_status);
static absl::Status CheckParameterCount(
const HloInstruction* calling_instruction,
const HloComputation* computation, int expected);
absl::Status CheckUnaryShape(const HloInstruction* instruction);
absl::Status CheckBinaryShape(const HloInstruction* instruction);
absl::Status CheckTernaryShape(const HloInstruction* instruction);
absl::Status CheckVariadicShape(const HloInstruction* instruction);
private:
std::string StringifyShape(const Shape& s) {
return opts_.layout_sensitive ? ShapeUtil::HumanStringWithLayout(s)
: ShapeUtil::HumanString(s);
}
bool SameElementType(const Shape& a, const Shape& b) {
return opts_.allow_mixed_precision
? ShapeUtil::SameElementTypeIgnoringFpPrecision(a, b)
: ShapeUtil::SameElementType(a, b);
}
absl::Status CheckIsTokenOperand(const HloInstruction* instruction,
int64_t operand_no);
absl::Status CheckOperandAndParameter(const HloInstruction* instruction,
int64_t operand_number,
const HloComputation* computation,
int64_t parameter_number);
absl::Status CheckAsyncOpComputationShapes(const HloInstruction* async_op,
const Shape& async_shape);
bool HasCompatibleElementTypes(const Shape& shape_0, const Shape& shape_1,
const Shape& result_shape);
const HloVerifierOpts& opts_;
};
class TargetVerifierMetadata {
public:
explicit TargetVerifierMetadata(HloVerifierOpts&& opts) : opts_(opts) {
CHECK(opts.instruction_can_change_layout == nullptr ||
opts.layout_sensitive);
}
virtual std::unique_ptr<ShapeVerifier> GetVerifier() const = 0;
TargetVerifierMetadata() = default;
virtual ~TargetVerifierMetadata() = default;
TargetVerifierMetadata(const TargetVerifierMetadata&) = delete;
TargetVerifierMetadata& operator=(const TargetVerifierMetadata&) = delete;
const HloVerifierOpts& GetVerifierOpts() const { return opts_; }
private:
HloVerifierOpts opts_;
};
class DefaultVerifierMetadata : public TargetVerifierMetadata {
public:
explicit DefaultVerifierMetadata(HloVerifierOpts&& opts)
: TargetVerifierMetadata(std::move(opts)) {}
std::unique_ptr<ShapeVerifier> GetVerifier() const override {
return std::make_unique<ShapeVerifier>(GetVerifierOpts());
}
};
class HloVerifier : public HloModulePass {
public:
HloVerifier(
bool layout_sensitive, bool allow_mixed_precision,
HloPredicate instruction_can_change_layout_func = {},
std::function<int64_t(const Shape&)> shape_size_func =
[](const Shape& shape) { return ShapeUtil::ByteSizeOf(shape); })
: HloVerifier(HloVerifierOpts{}
.WithLayoutSensitive(layout_sensitive)
.WithAllowMixedPrecision(allow_mixed_precision)
.WithInstructionCanChangeLayout(
instruction_can_change_layout_func)
.WithCustomShapeSize(shape_size_func)) {}
explicit HloVerifier(HloVerifierOpts&& opts)
: target_metadata_(
std::make_unique<DefaultVerifierMetadata>(std::move(opts))),
context_("Unknown") {}
explicit HloVerifier(std::unique_ptr<TargetVerifierMetadata> target_metadata,
absl::string_view context = "Unknown")
: target_metadata_(std::move(target_metadata)), context_(context) {}
~HloVerifier() override = default;
absl::string_view name() const override { return "hlo-verifier"; }
using HloPassInterface::Run;
using HloPassInterface::RunOnModuleGroup;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
std::unique_ptr<TargetVerifierMetadata> target_metadata_;
std::string context_;
};
class MetadataTracker : public DfsHloVisitorWithDefault {
public:
explicit MetadataTracker(absl::string_view prefix);
~MetadataTracker() override;
absl::Status DefaultAction(HloInstruction* instruction) override;
void HandleMetadata(const OpMetadata& metadata);
private:
const std::string prefix_;
int64_t instruction_count_ = 0;
int64_t has_op_type_count_ = 0;
int64_t has_op_name_count_ = 0;
int64_t has_source_file_count_ = 0;
int64_t has_dummy_source_file_count_ = 0;
int64_t has_source_line_count_ = 0;
int64_t has_creation_pass_id_count_ = 0;
int64_t has_logical_creation_pass_id_count_ = 0;
int64_t has_size_of_generated_code_in_bytes_count_ = 0;
int64_t has_size_of_memory_working_set_in_bytes_count_ = 0;
int64_t has_profile_info_count_ = 0;
};
}
#endif
#include "xla/service/hlo_verifier.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsCallerInstruction(HloInstruction* hlo) {
return HloInstruction::MightHaveCalledComputations(hlo->opcode());
}
absl::Status CheckOperandCount(const HloInstruction* hlo, int expected) {
if (hlo->operand_count() != expected) {
return Internal("Expected %d operands for %s instruction: %s", expected,
HloOpcodeString(hlo->opcode()), hlo->ToString());
}
return absl::OkStatus();
}
int64_t GetSubgroupSize(HloCollectiveInstruction* hlo,
CollectiveOpGroupMode group_mode) {
const HloModuleConfig& config = hlo->GetModule()->config();
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
int64_t replica_subgroup_size =
hlo->replica_groups().empty()
? config.replica_count()
: hlo->replica_groups()[0].replica_ids_size();
if (group_mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
replica_subgroup_size *= config.num_partitions();
}
return replica_subgroup_size;
}
case CollectiveOpGroupMode::kFlattenedID:
return hlo->replica_groups()[0].replica_ids_size();
case CollectiveOpGroupMode::kCrossPartition:
return hlo->replica_groups().empty()
? config.num_partitions()
: hlo->replica_groups()[0].replica_ids_size();
}
}
absl::Status CheckNestedComputationThreadNameEqual(
const HloComputation* comp, bool skip_nested_async_op_check) {
for (const HloInstruction* instr : comp->instructions()) {
if (skip_nested_async_op_check && instr->IsAsynchronous()) {
continue;
}
for (const HloComputation* called_cmp : instr->called_computations()) {
if (called_cmp->execution_thread() != comp->execution_thread()) {
return Internal(
"Nested computations expects same computation's thread name (%s vs "
"%s).",
called_cmp->execution_thread(), comp->execution_thread());
}
TF_RETURN_IF_ERROR(CheckNestedComputationThreadNameEqual(
called_cmp, skip_nested_async_op_check));
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::CheckParameterCount(
const HloInstruction* calling_instruction,
const HloComputation* computation, int expected) {
if (computation->num_parameters() != expected) {
return Internal(
"Expected computation %s called from %s to have %d parameters, has %d",
computation->name(), calling_instruction->name(), expected,
computation->num_parameters());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::Preprocess(HloInstruction* hlo) {
if (!hlo->called_computations().empty() && !IsCallerInstruction(hlo)) {
return Internal(
"Called computations specified for non-caller instruction %s",
hlo->ToString());
}
std::optional<int> arity = HloOpcodeArity(hlo->opcode());
if (arity) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, *arity));
}
if (!opts_.allow_unbounded_dynamism && hlo->shape().is_unbounded_dynamic()) {
return InvalidArgument("Unbounded dynamism is disabled for instruction: %s",
hlo->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleElementwiseUnary(HloInstruction* hlo) {
return CheckUnaryShape(hlo);
}
absl::Status ShapeVerifier::HandleElementwiseBinary(HloInstruction* hlo) {
return CheckBinaryShape(hlo);
}
absl::Status ShapeVerifier::HandleClamp(HloInstruction* clamp) {
return CheckTernaryShape(clamp);
}
absl::Status ShapeVerifier::HandleSelect(HloInstruction* select) {
return CheckTernaryShape(select);
}
absl::Status ShapeVerifier::HandleConcatenate(HloInstruction* concatenate) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : concatenate->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(concatenate,
ShapeInference::InferConcatOpShape(
operand_shapes, concatenate->concatenate_dimension()));
}
absl::Status ShapeVerifier::HandleConvert(HloInstruction* convert) {
return CheckShape(convert, ShapeInference::InferConvertShape(
convert->operand(0)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleBitcastConvert(HloInstruction* convert) {
return CheckShape(convert, ShapeInference::InferBitcastConvertShape(
convert->operand(0)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleStochasticConvert(HloInstruction* convert) {
return CheckShape(
convert, ShapeInference::InferStochasticConvertShape(
convert->operand(0)->shape(), convert->operand(1)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleCopy(HloInstruction* copy) {
return CheckUnaryShape(copy);
}
absl::Status ShapeVerifier::HandleDot(HloInstruction* dot) {
auto sparsity = Cast<HloDotInstruction>(dot)->sparsity();
TF_RETURN_IF_ERROR(
CheckOperandCount(dot, HloDotInstruction::kOperands + sparsity.size()));
TF_ASSIGN_OR_RETURN(
const Shape expected,
ShapeInference::InferDotOpShape(
dot->operand(0)->shape(), dot->operand(1)->shape(),
dot->dot_dimension_numbers(),
dot->shape().element_type(), sparsity));
if (auto nibble_count =
absl::c_count(dot->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE)) {
if (nibble_count == 1) {
return InvalidArgument("Dot cannot have a single packed nibble argument");
}
if (nibble_count == 2) {
if (!ShapeUtil::ElementIsIntegralWithBits(dot->operand(0)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. LHS is "
"%s.",
dot->operand(0)->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(dot->operand(1)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. RHS is "
"%s.",
dot->operand(1)->ToString());
}
}
}
for (int i = 0; i < sparsity.size(); ++i) {
const SparsityDescriptor& descriptor = sparsity[i];
TF_RET_CHECK(descriptor.index() == 0 || descriptor.index() == 1);
TF_ASSIGN_OR_RETURN(const Shape expected_metadata_shape,
ShapeInference::InferSparseDotMetadataShape(
dot->operand(descriptor.index())->shape(),
dot->dot_dimension_numbers(), descriptor));
const Shape actual_metadata_shape =
dot->operand(HloDotInstruction::kOperands + i)->shape();
if (!ShapeUtil::Compatible(actual_metadata_shape,
expected_metadata_shape)) {
return Internal(
"Expected sparse dot metadata to have shape equal to %s, actual "
"shape is %s:\n%s",
StringifyShape(expected_metadata_shape),
StringifyShape(actual_metadata_shape), dot->ToString());
}
}
return CheckShape(dot, expected);
}
absl::Status ShapeVerifier::HandleConvolution(HloInstruction* convolution) {
TF_ASSIGN_OR_RETURN(
Shape expected,
ShapeInference::InferConvolveShape(
convolution->operand(0)->shape(), convolution->operand(1)->shape(),
convolution->feature_group_count(), convolution->batch_group_count(),
convolution->window(), convolution->convolution_dimension_numbers(),
convolution->shape().element_type()));
if (auto nibble_count =
absl::c_count(convolution->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE)) {
if (nibble_count == 1) {
return InvalidArgument(
"Convolution cannot have a single packed nibble argument");
}
if (nibble_count == 2) {
if (convolution->feature_group_count() != 1) {
return InvalidArgument(
"Packed nibble precision does not support feature group count "
"%s.",
convolution->ToString());
}
if (convolution->batch_group_count() != 1) {
return InvalidArgument(
"Packed nibble precision does not support batch group count "
"%s.",
convolution->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(
convolution->operand(0)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. LHS is "
"%s.",
convolution->operand(0)->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(
convolution->operand(1)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. RHS is "
"%s.",
convolution->operand(1)->ToString());
}
}
}
return CheckShape(convolution, expected);
}
absl::Status ShapeVerifier::HandleFft(HloInstruction* fft) {
TF_ASSIGN_OR_RETURN(
const Shape expected,
ShapeInference::InferFftShape(fft->operand(0)->shape(), fft->fft_type(),
fft->fft_length()));
return CheckShape(fft, expected);
}
absl::Status ShapeVerifier::HandleTriangularSolve(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(const Shape expected,
ShapeInference::InferTriangularSolveShape(
hlo->operand(0)->shape(), hlo->operand(1)->shape(),
hlo->triangular_solve_options()));
return CheckShape(hlo, expected);
}
absl::Status ShapeVerifier::HandleCholesky(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, 1));
TF_ASSIGN_OR_RETURN(const Shape expected, ShapeInference::InferCholeskyShape(
hlo->operand(0)->shape()));
return CheckShape(hlo, expected);
}
absl::Status ShapeVerifier::HandleOptimizationBarrier(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, 1));
return CheckShape(hlo, hlo->operand(0)->shape());
}
bool ShapeVerifier::ShapesSame(const Shape& a, const Shape& b,
Shape::Equal equal) {
if (!opts_.layout_sensitive) {
return ShapeUtil::Compatible(a, b);
}
return equal(a, b);
}
static absl::Status CheckReplicaGroups(HloInstruction* hlo,
CollectiveOpGroupMode group_mode,
bool uniform_replica_group_size = true) {
if (!hlo->replica_groups().empty()) {
absl::flat_hash_set<int64_t> replicas_seen;
for (const ReplicaGroup& g : hlo->replica_groups()) {
if (g.replica_ids().empty()) {
return Internal("Instruction cannot have an empty replica group: %s",
hlo->ToString());
}
for (int64_t i : g.replica_ids()) {
if (!replicas_seen.insert(i).second) {
return Internal(
"Replica %d is repeated in instruction's replica-groups: %s", i,
hlo->ToString());
}
}
}
size_t n = replicas_seen.size();
for (int64_t i = 0; i < n; ++i) {
if (!replicas_seen.count(i)) {
return Internal(
"Replica %d is not named in instruction's replica-groups: %s", i,
hlo->ToString());
}
}
int64_t replica_count = hlo->GetModule()->config().replica_count();
int64_t num_partitions = hlo->GetModule()->con | #include "xla/service/hlo_verifier.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/log_severity.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/layout_assignment.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
std::unique_ptr<HloModule> CreateUnverifiedModule() {
return std::make_unique<HloModule>("module", HloModuleConfig());
}
class HloVerifierTest : public HloTestBase {
public:
HloVerifierTest()
: HloTestBase(false,
false) {}
};
class HloVerifierTestAllowMixedPrecision : public HloTestBase {
public:
HloVerifierTestAllowMixedPrecision()
: HloTestBase(false,
true) {}
};
class HloVerifierTestLayoutSensitive : public HloTestBase {
public:
HloVerifierTestLayoutSensitive()
: HloTestBase(true,
false,
LayoutAssignment::InstructionCanChangeLayout) {}
};
class HloVerifierTestLayoutFusion : public HloTestBase {
public:
HloVerifierTestLayoutFusion()
: HloTestBase(true,
false) {}
};
TEST_F(HloVerifierTest, NullInstructionParent) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
negate->set_parent(nullptr);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("has a null parent pointer"));
}
TEST_F(HloVerifierTest, NullComputationParent) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
computation->set_parent(nullptr);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("has a null parent pointer"));
}
TEST_F(HloVerifierTest, DifferentOperandParents) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
HloComputation::Builder emb_builder(TestName());
HloInstruction* emb_param = emb_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
module->AddEmbeddedComputation(emb_builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
TF_ASSERT_OK(negate->ReplaceOperandWith(0, emb_param));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("is in a different computation"));
}
TEST_F(HloVerifierTest, ResetsShapeVerifierState) {
HloComputation::Builder builder(TestName());
Shape s1 = ShapeUtil::MakeShape(F32, {1});
Shape s2 = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(s2, HloOpcode::kAdd, param, param));
builder.AddInstruction(
HloInstruction::CreateBinary(s2, HloOpcode::kMultiply, add, add));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
}
TEST_F(HloVerifierTest, CheckCallOperandParameterShapesMismatch) {
const char* const hlo_string = R"(
HloModule Module
callme {
ROOT param = (s32[], f32[4]) parameter(0)
}
ENTRY entry {
p0 = (f32[4], s32[]) parameter(0)
ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("shape does not match parameter"));
}
TEST_F(HloVerifierTest, CheckCallThreadMismatch) {
constexpr absl::string_view hlo = R"(
HloModule Module
callme {
ROOT param = (s32[], f32[4]) parameter(0)
}, execution_thread="parallel_thread"
ENTRY entry {
p0 = (s32[], f32[4]) parameter(0)
ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, CheckConditionalOperandParameterShapesMismatch) {
const char* const hlo_string = R"(
HloModule Module
true_branch {
tparam = (s32[], f32[4]) parameter(0)
ROOT tgte1 = f32[4] get-tuple-element(tparam), index=1
}
false_branch {
fparam = (s32[], f32[4]) parameter(0)
ROOT fgte1 = f32[4] get-tuple-element(fparam), index=1
}
ENTRY entry {
p0 = (f32[4], s32[]) parameter(0)
constant = pred[] constant(true)
ROOT conditional = f32[4] conditional(constant, p0, p0),
true_computation=true_branch, false_computation=false_branch
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("shape does not match parameter"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchIndexOperandShape) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
HloInstruction* condition = FindInstruction(module.get(), "b0");
*condition->mutable_shape() = ShapeUtil::MakeShape(F32, {});
status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"first operand of indexed conditional must be a scalar of S32"));
*condition->mutable_shape() = ShapeUtil::MakeShape(S32, {4});
status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("first operand of conditional must be a scalar"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchThread) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}, execution_thread="parallel_thread"
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
EXPECT_THAT(status.message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchContainsAsyncThread) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) custom-call-start(f32[4] fparam), async_execution_thread="parallel_thread", custom_call_target="foo"
ROOT %async-done = f32[4] custom-call-done(((f32[4]), f32[4], s32[]) %async-start)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, RngOpnd0NotScalar) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOpnd0NotScalar {
constant.0 = f32[] constant(0)
constant.1 = f16[2] constant({1, 3})
ROOT rng.0 = f32[10]{0} rng(f32[] constant.0, f16[2] constant.1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected scalar type"));
}
TEST_F(HloVerifierTest, RngOperandElementTypesDoNotMatch) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f16[] constant(1)
ROOT rng.0 = f32[10]{0} rng(f32[] constant.0, f16[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected compatible element types"));
}
TEST_F(HloVerifierTest, RngMixedPrecisionNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngResultElementTypeNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng.0 = f16[10]{0} rng(f32[] constant.0, f32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected compatible element types"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, RngMixedPrecisionAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngResultElementTypeNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng.0 = f16[10]{0} rng(f32[] constant.0, f32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, RngElementTypeNotSupported) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngElementTypeNotSupported {
constant.0 = s32[] constant(0)
constant.1 = s32[] constant(1)
ROOT rng.0 = s32[10]{0} rng(s32[] constant.0, s32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Element type not supported"));
}
TEST_F(HloVerifierTest, NegativeInteriorPaddingNotAllowed) {
HloComputation::Builder builder(TestName());
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param"));
PaddingConfig padding_config;
padding_config.add_dimensions()->set_interior_padding(-1);
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {100}), param,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32))),
padding_config));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Interior padding cannot be negative"));
}
TEST_F(HloVerifierTest, PadNegativeInteriorDilationNotAllowed) {
HloComputation::Builder builder(TestName());
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param"));
PaddingConfig padding_config;
padding_config.add_dimensions()->set_interior_padding(-1);
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {100}), param,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32).Clone())),
padding_config));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Interior padding cannot be negative"));
}
TEST_F(HloVerifierTest, DotMixedPrecisionAllowed) {
static const char* const kDotHloString = R"(
HloModule module
ENTRY entry_computation {
a = f32[2,10] parameter(0)
b = bf16[10,2] parameter(1)
ROOT dot = f32[2,2] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDotHloString));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok()) << status;
}
static const char* const kConvHloString = R"(
HloModule module
ENTRY entry_computation {
param0 = f16[128,128,56,56] parameter(0)
param1 = f16[3,3,128,128] parameter(1)
zero_f16 = f16[] constant(0)
ROOT conv = f16[128,128,28,28] convolution(param0, param1),
window={size=3x3 stride=2x2}, dim_labels=bf01_01io->bf01
})";
TEST_F(HloVerifierTest, ConvNegativeWindowDilationNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kConvHloString));
auto* conv = module->entry_computation()->root_instruction();
Window w = conv->window();
w.mutable_dimensions(0)->set_window_dilation(-1);
conv->set_window(w);
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("non-positive window dilation factor"));
}
TEST_F(HloVerifierTest, ConvNegativeBaseDilationNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kConvHloString));
auto* conv = module->entry_computation()->root_instruction();
Window w = conv->window();
w.mutable_dimensions(0)->set_base_dilation(-1);
conv->set_window(w);
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("non-positive base area dilation factor"));
}
static const char* const kAddWithLayoutChangeHlo = R"(
HloModule AddWithLayoutChange
ENTRY AddWithLayoutChange {
par0 = f32[3,4]{1,0} parameter(0)
par1 = f32[3,4]{0,1} parameter(1)
ROOT add0 = f32[3,4]{1,0} add(par0,par1)
}
)";
TEST_F(HloVerifierTest, AddWithLayoutChange) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kAddWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, ScalarIndexDynamicSlice) {
const char* const kScalarIndexDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258] parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258] dynamic-slice(s32[2,2,258] %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kScalarIndexDynamicSlice, config));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, ScalarIndexDynamicUpdateSlice) {
const char* const kScalarIndexDynamicSlice = R"(
HloModule DynamicUpdateSlice_module
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kScalarIndexDynamicSlice, config));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestAllowMixedPrecision, DynamicUpdateSliceMixedPrecision) {
const char* const kDynamicUpdateSliceMixedPrecision = R"(
HloModule kDynamicUpdateSliceMixedPrecision
ENTRY %entry (parameter.0: f32[32,511,2048], parameter.1: bf16[32,511,512], parameter.2: s32[], parameter.3: s32[], parameter.4: s32[]) -> bf16[32,511,2048] {
%parameter.0 = f32[32,511,2048] parameter(0)
%parameter.1 = bf16[32,511,512] parameter(1)
%parameter.2 = s32[] parameter(2)
%parameter.3 = s32[] parameter(3)
%parameter.4 = s32[] parameter(4)
ROOT %dus = bf16[32,511,2048] dynamic-update-slice(f32[32,511,2048] %parameter.0, bf16[32,511,512] %parameter.1, s32[] %parameter.2, s32[] %parameter.3, s32[] %parameter.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(
kDynamicUpdateSliceMixedPrecision));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[32,511,2048], actual shape is bf16[32,511,2048]"));
}
TEST_F(HloVerifierTestLayoutSensitive, AddWithLayoutChangeNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kAddWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, SliceWithLayoutChangeNotAllowed) {
const char* const kSliceWithLayoutChangeHlo = R"(
HloModule SliceWithLayoutChange
ENTRY SliceWithLayoutChange {
par0 = f32[4,5]{0,1} parameter(0)
par1 = s32[] parameter(1)
par2 = s32[] parameter(2)
ROOT dslice0 = f32[3,4]{1,0} dynamic-slice(par0, par1, par2),
dynamic_slice_sizes={3,4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kSliceWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, ConcatWithLayoutChangeNotAllowed) {
const char* const kConcatWithLayoutChangeHlo = R"(
HloModule ConcatWithLayoutChange
ENTRY ConcatWithLayoutChange {
par0 = f32[3,5]{0,1} parameter(0)
par1 = f32[3,3]{1,0} parameter(1)
ROOT concat0 = f32[3,8]{1,0} concatenate(f32[3,5] par0, f32[3,3] par1),
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kConcatWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, BitcastNeedsSameNumberOfElements) {
const char* const hlo_string = R"(
HloModule Module
ENTRY BitcastNeedsToBeNoOp {
constant.0 = f32[2] constant({0.0, 0.0})
ROOT bitcast = f32[3] bitcast(constant.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Bitcast cannot have different shape sizes of output "
"(12) and operand (8)"));
}
TEST_F(HloVerifierTest, SelectMixedPrecisionNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectMixedPrecisionNotAllowed {
p0 = pred[32] parameter(0)
p1 = f32[32] parameter(1)
p2 = bf16[32] parameter(2)
ROOT select = f32[32] select(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Seen floating point types of different precisions"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, SelectMixedPrecisionAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectMixedPrecisionAllowed {
p0 = pred[32] parameter(0)
p1 = f32[32] parameter(1)
p2 = bf16[32] parameter(2)
ROOT select = f32[32] select(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, SelectTupleNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectWithTuple {
p0 = (f32[], f32[]) parameter(0)
p1 = (f32[], f32[]) parameter(1)
p2 = pred[] parameter(2)
ROOT select = (f32[], f32[]) select(p2, p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected array argument for select"));
}
TEST_F(HloVerifierTestLayoutSensitive, CopyStartAndCopyDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive, CopyStartAndCopyDoneWrongLayout) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{0,1:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to"));
}
TEST_F(HloVerifierTest, CopyStartAndCopyDoneWrongType) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
copy-start = f32[2,3] copy-start(p0)
ROOT copy-done = f32[2,3] copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"(f32[2,3], f32[2,3], u32[])"));
}
TEST_F(HloVerifierTest, CopyStartMultipleCopyDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
copy-start = (f32[2,3], f32[2,3], u32[]) copy-start(p0)
copy-done.1 = f32[2,3] copy-done(copy-start)
copy-done.2 = f32[2,3] copy-done(copy-start)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(copy-done.1, copy-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("copy-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, CopyDoneNoCopyStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = (f32[2,3], f32[2,3], u32[]) tuple(p0, p0, p1)
ROOT copy-done = f32[2,3] copy-done(tuple)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a copy-done instruction needs to be "
"copy-start, found tuple"));
}
TEST_F(HloVerifierTestLayoutSensitive, AsyncStartAndAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive, AsyncStartAndAsyncUpdateAndAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncUpdateAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), custom_call_target="foo"
async-update.1 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-start)
async-update.2 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-update.1)
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-update.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
AsyncStartAndAsyncUpdateAndAsyncDoneWithThreadName) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncUpdateAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), async_execution_thread="parallel_thread", custom_call_target="foo"
async-update.1 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-start)
async-update.2 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-update.1)
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-update.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, AsyncStartAndAsyncDoneWrongType) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3] custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), |
1,990 | cpp | tensorflow/tensorflow | dot_merger | third_party/xla/xla/service/dot_merger.cc | third_party/xla/xla/service/dot_merger_test.cc | #ifndef XLA_SERVICE_DOT_MERGER_H_
#define XLA_SERVICE_DOT_MERGER_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class DotMerger : public HloModulePass {
public:
explicit DotMerger(int64_t max_size_to_merge)
: max_size_to_merge_(max_size_to_merge) {}
absl::string_view name() const override { return "dot-merger"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t max_size_to_merge_;
};
}
#endif
#include "xla/service/dot_merger.h"
#include <cstdint>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/protobuf_util.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> TryMergeSameOperand(HloInstruction* a,
HloInstruction* b) {
if (a->shape().layout() != b->shape().layout()) {
VLOG(3) << "Can't merge dots because they have a different layout:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
if (a->operand(0) != b->operand(0) && a->operand(1) != b->operand(1)) {
VLOG(4) << "Can't merge dots because they don't share an operand.\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
if (a->operand(0)->shape().element_type() !=
b->operand(0)->shape().element_type() ||
a->operand(1)->shape().element_type() !=
b->operand(1)->shape().element_type() ||
a->shape().element_type() != b->shape().element_type()) {
VLOG(3)
<< "Can't merge dots because their lhs/rhs/return-types don't match.\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
const DotDimensionNumbers& dnums_a = a->dot_dimension_numbers();
const DotDimensionNumbers& dnums_b = b->dot_dimension_numbers();
if (!absl::c_equal(dnums_a.lhs_batch_dimensions(),
dnums_b.lhs_batch_dimensions()) ||
!absl::c_equal(dnums_a.rhs_batch_dimensions(),
dnums_b.rhs_batch_dimensions()) ||
!absl::c_equal(dnums_a.lhs_contracting_dimensions(),
dnums_b.lhs_contracting_dimensions()) ||
!absl::c_equal(dnums_a.rhs_contracting_dimensions(),
dnums_b.rhs_contracting_dimensions())) {
VLOG(3) << "Can't merge dots because they have mismatching dnums.\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString() << "\n"
<< absl::c_equal(dnums_a.lhs_batch_dimensions(),
dnums_b.lhs_batch_dimensions())
<< ", "
<< absl::c_equal(dnums_a.rhs_batch_dimensions(),
dnums_b.rhs_batch_dimensions())
<< ", "
<< absl::c_equal(dnums_a.lhs_contracting_dimensions(),
dnums_b.lhs_contracting_dimensions())
<< ", "
<< absl::c_equal(dnums_a.rhs_contracting_dimensions(),
dnums_b.rhs_contracting_dimensions());
return nullptr;
}
if (!absl::c_equal(a->precision_config().operand_precision(),
b->precision_config().operand_precision())) {
VLOG(3) << "Can't merge dots because they have mismatching operand "
"precisions:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
HloDotInstruction* dot_a = Cast<HloDotInstruction>(a);
HloDotInstruction* dot_b = Cast<HloDotInstruction>(b);
if (!absl::c_equal(dot_a->sparsity(), dot_b->sparsity(),
protobuf_util::ProtobufEquals)) {
VLOG(3) << "Can't merge dots because they have mismatching sparsity "
"descriptors:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
VLOG(2) << "Merging dots sharing an operand:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
const DotDimensionNumbers& dnums = a->dot_dimension_numbers();
bool lhs_same = a->operand(0) == b->operand(0);
HloInstruction* shared_op = a->mutable_operand(lhs_same ? 0 : 1);
HloInstruction* diff_op_a = a->mutable_operand(lhs_same ? 1 : 0);
HloInstruction* diff_op_b = b->mutable_operand(lhs_same ? 1 : 0);
if (diff_op_a->shape().layout() != diff_op_b->shape().layout()) {
VLOG(3) << "Can't merge dots because the different operands have a "
"different layout:\n"
<< "\t" << diff_op_a->ToString() << "\n"
<< "\t" << diff_op_b->ToString();
return nullptr;
}
CHECK_EQ(dnums.lhs_batch_dimensions_size(),
dnums.rhs_batch_dimensions_size());
std::set<int64_t> used_dims;
int64_t shared_op_num_non_contracting_dims =
shared_op->shape().rank() - dnums.lhs_batch_dimensions_size();
if (lhs_same) {
shared_op_num_non_contracting_dims -=
dnums.lhs_contracting_dimensions_size();
used_dims.insert(dnums.rhs_contracting_dimensions().begin(),
dnums.rhs_contracting_dimensions().end());
used_dims.insert(dnums.rhs_batch_dimensions().begin(),
dnums.rhs_batch_dimensions().end());
} else {
shared_op_num_non_contracting_dims -=
dnums.rhs_contracting_dimensions_size();
used_dims.insert(dnums.lhs_contracting_dimensions().begin(),
dnums.lhs_contracting_dimensions().end());
used_dims.insert(dnums.lhs_batch_dimensions().begin(),
dnums.lhs_batch_dimensions().end());
}
if (used_dims.size() + 1 != diff_op_a->shape().rank()) {
VLOG(3)
<< "Can't merge dots because the different operands don't have exactly "
"one non-contracting dimension:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
int64_t outer_dim = 0;
for (auto used_dim : used_dims) {
if (used_dim != outer_dim) {
break;
}
++outer_dim;
}
std::vector<SparsityDescriptor> sparsity(dot_a->sparsity().begin(),
dot_a->sparsity().end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
HloInstruction* meta = a->mutable_operand(HloDotInstruction::kOperands + i);
HloInstruction* other_meta =
b->mutable_operand(HloDotInstruction::kOperands + i);
if (sparsity[i].index() == (lhs_same ? 1 : 0)) {
TF_ASSIGN_OR_RETURN(
Shape meta_concat_shape,
ShapeInference::InferConcatOpShape(
{&meta->shape(), &other_meta->shape()}, outer_dim));
meta = meta->AddInstruction(HloInstruction::CreateConcatenate(
meta_concat_shape, {meta, other_meta}, outer_dim));
} else {
if (other_meta != meta) {
VLOG(3)
<< "Can't merge dots because the sparsity metadata is different:\n"
<< "\t" << a->ToString() << "\n"
<< "\t" << b->ToString();
return nullptr;
}
}
sparse_meta[i] = meta;
}
TF_ASSIGN_OR_RETURN(
Shape concat_shape,
ShapeInference::InferConcatOpShape(
{&diff_op_a->shape(), &diff_op_b->shape()}, outer_dim));
*concat_shape.mutable_layout() = diff_op_a->shape().layout();
HloInstruction* concat_op =
diff_op_a->AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, {diff_op_a, diff_op_b}, outer_dim));
HloInstruction* dot_lhs = lhs_same ? shared_op : concat_op;
HloInstruction* dot_rhs = lhs_same ? concat_op : shared_op;
TF_ASSIGN_OR_RETURN(
Shape new_dot_shape,
ShapeInference::InferDotOpShape(
dot_lhs->shape(), dot_rhs->shape(), dnums,
a->shape().element_type(), sparsity));
*new_dot_shape.mutable_layout() = a->shape().layout();
HloInstruction* new_dot = a->AddInstruction(
HloInstruction::CreateDot(new_dot_shape, dot_lhs, dot_rhs, dnums,
a->precision_config(), sparsity, sparse_meta));
if (!a->metadata().op_name().empty()) {
new_dot->set_metadata(a->metadata());
} else if (!b->metadata().op_name().empty()) {
new_dot->set_metadata(b->metadata());
}
DimensionVector start_indices(new_dot_shape.dimensions_size(), 0);
DimensionVector limit_indices(new_dot_shape.dimensions().begin(),
new_dot_shape.dimensions().end());
DimensionVector strides(new_dot_shape.dimensions_size(), 1);
int64_t slice_dim = new_dot_shape.dimensions_size() -
(lhs_same ? 1 : 1 + shared_op_num_non_contracting_dims);
limit_indices[slice_dim] = a->shape().dimensions(slice_dim);
HloInstruction* new_a = a->AddInstruction(HloInstruction::CreateSlice(
a->shape(), new_dot, start_indices, limit_indices, strides));
TF_RETURN_IF_ERROR(a->ReplaceAllUsesWith(new_a));
start_indices[slice_dim] = limit_indices[slice_dim];
limit_indices[slice_dim] = new_dot_shape.dimensions(slice_dim);
HloInstruction* new_b = b->AddInstruction(HloInstruction::CreateSlice(
b->shape(), new_dot, start_indices, limit_indices, strides));
TF_RETURN_IF_ERROR(b->ReplaceAllUsesWith(new_b));
return new_dot;
}
absl::StatusOr<bool> MergeDots(HloComputation* comp,
int64_t max_size_to_merge) {
auto is_merge_candidate = [&](HloInstruction* instr) {
int64_t bytes = ShapeUtil::ByteSizeOfElements(instr->shape());
for (const HloInstruction* operand : instr->operands()) {
bytes += ShapeUtil::ByteSizeOfElements(operand->shape());
}
return bytes <= max_size_to_merge;
};
absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>>
equivalence_classes;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kDot ||
!instr->control_predecessors().empty() ||
!instr->control_successors().empty()) {
continue;
}
for (HloInstruction* operand : instr->operands()) {
equivalence_classes[operand].insert(instr);
}
}
absl::erase_if(
equivalence_classes,
[&](const std::pair<const HloInstruction*,
absl::flat_hash_set<HloInstruction*>>& kv) {
const auto& v = kv.second;
return v.size() < 2 || absl::c_none_of(v, is_merge_candidate);
});
if (equivalence_classes.empty()) {
return false;
}
tensorflow::GraphCycles graph;
absl::flat_hash_map<HloInstruction*, int32_t> graph_ids_map;
auto graph_id = [&](HloInstruction* instr) {
auto it_and_inserted = graph_ids_map.emplace(instr, -1);
auto it = it_and_inserted.first;
auto inserted = it_and_inserted.second;
if (inserted) {
it->second = graph.NewNode();
}
return it->second;
};
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
int32_t id = graph_id(instr);
for (HloInstruction* operand : instr->operands()) {
CHECK(graph.InsertEdge(graph_id(operand), id));
}
for (HloInstruction* control_pred : instr->control_predecessors()) {
CHECK(graph.InsertEdge(graph_id(control_pred), id));
}
}
absl::flat_hash_set<HloInstruction*> dead_instrs;
std::vector<HloInstruction*> keys;
keys.reserve(equivalence_classes.size());
for (auto& kv : equivalence_classes) {
keys.push_back(kv.first);
}
absl::c_sort(keys, [](const HloInstruction* a, const HloInstruction* b) {
return a->unique_id() < b->unique_id();
});
for (auto key : keys) {
const auto& values = equivalence_classes[key];
absl::InlinedVector<HloInstruction*, 16> dots(values.begin(), values.end());
absl::c_sort(dots, [](const HloInstruction* a, const HloInstruction* b) {
return a->unique_id() < b->unique_id();
});
for (int64_t i = 0; i < dots.size(); i++) {
HloInstruction*& a = dots[i];
if (a == nullptr) {
continue;
}
for (int64_t j = i + 1; j < dots.size(); j++) {
HloInstruction* b = dots[j];
if (b == nullptr) {
continue;
}
int32_t a_id = graph_id(a);
int32_t b_id = graph_id(b);
if (dead_instrs.contains(a) || dead_instrs.contains(b) ||
(!is_merge_candidate(a) && !is_merge_candidate(b)) ||
graph.IsReachableNonConst(a_id, b_id) ||
graph.IsReachableNonConst(b_id, a_id)) {
continue;
}
TF_ASSIGN_OR_RETURN(HloInstruction * merged, TryMergeSameOperand(a, b));
if (merged != nullptr) {
int32_t merged_id = graph_id(merged);
graph.InsertEdge(a_id, merged_id);
graph.InsertEdge(b_id, merged_id);
for (int32_t succ : graph.SuccessorsCopy(a_id)) {
graph.InsertEdge(merged_id, succ);
}
for (int32_t succ : graph.SuccessorsCopy(b_id)) {
graph.InsertEdge(merged_id, succ);
}
dead_instrs.insert(a);
dead_instrs.insert(b);
dots[i] = merged;
dots[j] = nullptr;
}
}
}
}
for (HloInstruction* instr : dead_instrs) {
TF_RETURN_IF_ERROR(comp->RemoveInstruction(instr));
}
return !dead_instrs.empty();
}
}
absl::StatusOr<bool> DotMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool changed_computation,
MergeDots(comp, max_size_to_merge_));
changed |= changed_computation;
}
return changed;
}
} | #include "xla/service/dot_merger.h"
#include <cstdint>
#include <limits>
#include <memory>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class DotMergerTest : public HloTestBase {
public:
DotMergerTest()
: HloTestBase(false,
false) {}
};
TEST_F(DotMergerTest, MergeRHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[200,100] parameter(0)
rhs0 = f32[100, 10] parameter(1)
rhs1 = f32[100, 50] parameter(2)
dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* dot0 = nullptr;
const HloInstruction* dot1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1)))));
EXPECT_EQ(dot0, dot1);
EXPECT_THAT(dot0,
GmockMatch(m::Dot(m::Parameter(0),
m::Concatenate().WithBinaryOperandsAnyOrder(
m::Parameter(1), m::Parameter(2)))));
}
TEST_F(DotMergerTest, MergeRHSWithLayouts) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[200,100] parameter(0)
rhs0 = f32[100, 10]{0,1} parameter(1)
rhs1 = f32[100, 50]{0,1} parameter(2)
dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* dot0 = nullptr;
const HloInstruction* dot1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1)))));
EXPECT_EQ(dot0, dot1);
Shape expected_concat_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 60}, {0, 1});
EXPECT_THAT(
dot0, GmockMatch(m::Dot(m::Parameter(0),
m::Concatenate()
.WithBinaryOperandsAnyOrder(m::Parameter(1),
m::Parameter(2))
.WithShapeEqualTo(&expected_concat_shape))));
}
TEST_F(DotMergerTest, NoMergeDifferentLayoutRHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[200,100] parameter(0)
rhs0 = f32[100, 10]{0,1} parameter(1)
rhs1 = f32[100, 50]{1,0} parameter(2)
dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeLHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeLHSDotsWithNonDefaultLayout) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50]{0,1} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{0,1}) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
Shape expected_dot_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {400, 50}, {0, 1});
const HloInstruction* dot0 = nullptr;
const HloInstruction* dot1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::Dot(&dot0, m::Op(), m::Op())
.WithShapeEqualTo(&expected_dot_shape)),
m::Slice(m::Dot(&dot1, m::Op(), m::Op())))));
EXPECT_EQ(dot0, dot1);
}
TEST_F(DotMergerTest, NoMergeDifferentLayoutLHS) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200]{1,0} parameter(0)
lhs1 = f32[300,200]{0,1} parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentDotLayout) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50]{1,0} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{1,0}) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeThree) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
lhs2 = f32[500,200] parameter(2)
rhs = f32[200, 50] parameter(3)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}};
TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status());
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
const HloInstruction* s2 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(
&s0,
m::Concatenate(m::Parameter(0), m::Parameter(1), m::Parameter(2)),
m::Parameter(3))),
m::Slice(m::Op(&s1)), m::Slice(m::Op(&s2)))));
EXPECT_EQ(s0, s1);
EXPECT_EQ(s1, s2);
}
TEST_F(DotMergerTest, NoMergeThreeDueToCycle) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[200, 50] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
zero = f32[] constant(0)
lhs2 = f32[500,200] pad(dot0, zero), padding=400_0x150_0
dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}};
TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status());
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
const HloInstruction* s2 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Slice(m::Op(&s1)),
m::Dot(&s2, m::Op(), m::Parameter(2)))));
EXPECT_EQ(s0, s1);
EXPECT_NE(s0, s2);
}
TEST_F(DotMergerTest, NoMergeDataDependency) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
rhs = f32[200, 50] parameter(1)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
zero = f32[] constant(0)
lhs1 = f32[300,200] pad(dot0, zero), padding=200_0x150_0
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeSameContractingDimsOnBothSides) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
rhs = f32[50, 200] parameter(2)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeWithBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[2,4,100,200] parameter(0)
lhs1 = f32[2,4,300,200] parameter(1)
rhs = f32[2,4,200, 50] parameter(2)
dot0 = f32[2,4,100, 50] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
dot1 = f32[2,4,300, 50] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
ROOT tuple = (f32[2,4,100,50], f32[2,4,300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeWithBatchDimsAndMultipleContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[2,3,4,5] parameter(0)
rhs0 = f32[2,6,3,4,5] parameter(1)
rhs1 = f32[2,7,3,4,5] parameter(2)
dot0 = f32[2,4,6] dot(lhs, rhs0), lhs_batch_dims={0,2}, rhs_batch_dims={0,3},
lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4}
dot1 = f32[2,4,7] dot(lhs, rhs1), lhs_batch_dims={0,2}, rhs_batch_dims={0,3},
lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4}
ROOT tuple = (f32[2,4,6], f32[2,4,7]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, MergeWithUnsortedBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[2,4,100,200] parameter(0)
lhs1 = f32[2,4,300,200] parameter(1)
rhs = f32[2,4,200, 50] parameter(2)
dot0 = f32[4,2,100, 50] dot(lhs0, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
dot1 = f32[4,2,300, 50] dot(lhs1, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
ROOT tuple = (f32[4,2,100,50], f32[4,2,300,50]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(), m::Slice())));
}
TEST_F(DotMergerTest, NoMergeDueToIsMergeCandidate) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[100,200] parameter(0)
lhs1 = f32[300,200] parameter(1)
lhs2 = f32[500,200] parameter(2)
rhs = f32[200, 50] parameter(3)
dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass((100 * 50 + 100 * 200 + 200 * 50) *
sizeof(float));
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
const HloInstruction* s2 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(3))),
m::Slice(m::Op(&s1)),
m::Dot(&s2, m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(s0, s1);
EXPECT_NE(s0, s2);
}
TEST_F(DotMergerTest, NoMergeDifferentLhsBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10,10] parameter(0)
lhs1 = f32[10,10,10,10] parameter(1)
rhs = f32[10,10,10,10] parameter(2)
dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,2}, rhs_batch_dims={0,1}, lhs_contracting_dims={1}, rhs_contracting_dims={2}
ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentRhsBatchDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10,10] parameter(0)
lhs1 = f32[10,10,10,10] parameter(1)
rhs = f32[10,10,10,10] parameter(2)
dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,2}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeMultipleContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10] parameter(0)
lhs1 = f32[10,10,10] parameter(1)
rhs = f32[10,10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Slice(m::Op(&s1)))));
EXPECT_EQ(s0, s1);
}
TEST_F(DotMergerTest, MergeMultipleNonContractingDimsInRhsSharedOperand) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[8,9,10] parameter(0)
lhs1 = f32[8,9,11] parameter(1)
rhs = f32[8,9,12,13] parameter(2)
dot0 = f32[10,12,13] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
dot1 = f32[11,12,13] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT tuple = (f32[10,12,13], f32[11,12,13]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK(verifier().Run(module.get()).status());
const HloInstruction* s0 = nullptr;
const HloInstruction* s1 = nullptr;
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Slice(m::Op(&s1)))));
EXPECT_EQ(s0, s1);
}
TEST_F(DotMergerTest, NoMergeMultipleOuterDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10,10] parameter(0)
lhs1 = f32[10,10,10] parameter(1)
rhs = f32[10,10,10] parameter(2)
dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentLhsContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f32[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentRhsContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f32[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={1}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeControlPredecessor) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f32[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot2 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}, control-predecessors={dot1}
ROOT tuple = (f32[10,10], f32[10,10], f32[10,10]) tuple(dot0, dot1, dot2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentLhsTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f32[10,10] parameter(0)
lhs1 = f16[10,10] parameter(1)
rhs = f32[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentRhsTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs = f32[10,10] parameter(0)
rhs0 = f32[10,10] parameter(1)
rhs1 = f16[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs, rhs0), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs, rhs1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, NoMergeDifferentReturnTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f16[10,10] parameter(0)
lhs1 = f16[10,10] parameter(1)
rhs = f16[10,10] parameter(2)
dot0 = f16[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f16[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(DotMergerTest, MergeWithTypeUpgrade) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
lhs0 = f16[10,10] parameter(0)
lhs1 = f16[10,10] parameter(1)
rhs = f16[10,10] parameter(2)
dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* d0 = nullptr;
const HloInstruction* d1 = nullptr;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Dot(&d0, m::Concatenate(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))
.WithShape(F32, {20, 10})),
m::Slice(m::Op(&d1)))));
EXPECT_EQ(d0, d1);
}
TEST_F(DotMergerTest, MergeSparseDotsSameMetadata) {
absl::string_view kHlo = R"(
HloModule test
ENTRY main {
lhs0 = f16[5,10,32] parameter(0)
lhs1 = f16[5,10,32] parameter(1)
rhs = f16[5,10,16] parameter(2)
meta = u16[5,10,2] parameter(3)
dot0 = f32[5,10,10] dot(lhs0, rhs, meta), sparsity=R.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
dot1 = f32[5,10,10] dot(lhs1, rhs, meta), sparsity=R.2@2:4,
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
DotMerger pass(std::numeric_limits<int64_t>::max());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
const HloInstruction *d0, *d1;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Slice(m::Op(&d0)
.WithOpcode(HloOpcode::kDot)
.WithOperand(0, m::Concatenate(m::Parameter(0),
m::Parameter(1)))
.WithOperand(1, m::Parameter(2))
.WithOperand(2, m::Parameter(3))
.WithShape(F32, {5, 20, 10})),
m::Slice(m::Op(&d1)))));
EXPECT_EQ(d0, d1);
EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 10, 2}));
}
TEST_F(DotMergerTest, MergeSparseDotsConcatMetadata) {
absl::string_view kHlo = R"(
HloModule test
ENTRY main {
lhs0 = f16[5,10,16] parameter(0)
lhs1 = f16[5,10,16] parameter(1)
rhs = f16[5,10,32] parameter(2)
meta0 = u16[5,10,2] parameter(3)
meta1 = u16[5,10,2] parameter(4)
dot0 = f32[5,10,10] dot(lhs0, r |
1,991 | cpp | tensorflow/tensorflow | alias_analysis | third_party/xla/xla/service/llvm_ir/alias_analysis.cc | third_party/xla/xla/service/llvm_ir/alias_analysis_test.cc | #ifndef XLA_SERVICE_LLVM_IR_ALIAS_ANALYSIS_H_
#define XLA_SERVICE_LLVM_IR_ALIAS_ANALYSIS_H_
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "llvm/IR/Module.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/types.h"
namespace xla {
namespace llvm_ir {
class AliasAnalysis {
public:
AliasAnalysis(const HloModule& module, const BufferAssignment& assignment,
llvm::LLVMContext* context)
: module_(module), assignment_(assignment), context_(context) {}
void AddAliasingInformationToIrArray(const HloInstruction& hlo,
llvm_ir::IrArray* array,
const ShapeIndex& index = {});
private:
llvm::MDNode* GetAliasDomain();
llvm::MDNode* GetAliasScopeMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain);
llvm::MDNode* GetNoaliasMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain,
const BufferAssignment& assignment, const HloInstruction& hlo);
const HloModule& module_;
const BufferAssignment& assignment_;
llvm::LLVMContext* context_;
llvm::MDNode* alias_domain_ = nullptr;
absl::flat_hash_map<BufferAllocation::Slice, llvm::MDNode*>
alias_scope_metadata_;
absl::flat_hash_map<std::pair<BufferAllocation::Slice, const HloInstruction*>,
llvm::MDNode*>
noalias_metadata_;
};
}
}
#endif
#include "xla/service/llvm_ir/alias_analysis.h"
#include <map>
#include "absl/container/flat_hash_set.h"
#include "llvm/IR/MDBuilder.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/logical_buffer.h"
#include "xla/types.h"
namespace xla {
namespace llvm_ir {
static const BufferAllocation* kParameterAllocation = new BufferAllocation(
-1, 0, LogicalBuffer::Color(0));
void AliasAnalysis::AddAliasingInformationToIrArray(const HloInstruction& hlo,
llvm_ir::IrArray* array,
const ShapeIndex& index) {
BufferAllocation::Slice buffer_slice;
if (hlo.opcode() == HloOpcode::kParameter &&
hlo.parent() == module_.entry_computation()) {
buffer_slice = BufferAllocation::Slice(kParameterAllocation, 0, 0);
} else {
auto unique_slice = assignment_.GetUniqueSlice(&hlo, index);
if (!unique_slice.ok()) {
return;
}
buffer_slice = unique_slice.value();
}
if (module_.config().debug_options().xla_llvm_enable_alias_scope_metadata()) {
llvm::MDNode*& alias_scope_md = alias_scope_metadata_[buffer_slice];
if (alias_scope_md == nullptr) {
alias_scope_md =
GetAliasScopeMetadataForBuffer(buffer_slice, GetAliasDomain());
}
if (alias_scope_md != nullptr) {
array->AddAliasScopeMetadata(alias_scope_md);
}
}
if (module_.config().debug_options().xla_llvm_enable_noalias_metadata()) {
llvm::MDNode*& noalias_md = noalias_metadata_[{buffer_slice, &hlo}];
if (noalias_md == nullptr) {
noalias_md = GetNoaliasMetadataForBuffer(buffer_slice, GetAliasDomain(),
assignment_, hlo);
}
if (noalias_md != nullptr) {
array->AddNoaliasMetadata(noalias_md);
}
}
if (module_.config()
.debug_options()
.xla_llvm_enable_invariant_load_metadata()) {
if (hlo.opcode() == HloOpcode::kParameter &&
hlo.parent() == module_.entry_computation()) {
array->MarkInvariantOverWholeProgram(context_);
}
}
}
llvm::MDNode* AliasAnalysis::GetAliasDomain() {
llvm::MDBuilder metadata_builder(*context_);
if (alias_domain_ == nullptr) {
alias_domain_ =
metadata_builder.createAliasScopeDomain("XLA global AA domain");
}
return alias_domain_;
}
llvm::MDNode* AliasAnalysis::GetAliasScopeMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain) {
if (buffer_slice.allocation() == kParameterAllocation) {
return nullptr;
}
llvm::MDBuilder metadata_builder(domain->getContext());
llvm::MDNode* scope = metadata_builder.createAliasScope(
"buffer: " + buffer_slice.ToString(), domain);
llvm::MDNode* scope_list = llvm::MDNode::get(domain->getContext(), scope);
return scope_list;
}
llvm::MDNode* AliasAnalysis::GetNoaliasMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain,
const BufferAssignment& assignment, const HloInstruction& hlo) {
std::vector<const HloValue*> worklist;
absl::flat_hash_set<const HloInstruction*> added_to_worklist;
auto add_buffers_to_worklist =
[&](const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kParameter) {
return;
}
if (added_to_worklist.contains(instruction)) {
return;
}
added_to_worklist.insert(instruction);
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& , const ShapeIndex& index) {
for (const HloValue* buffer :
assignment.GetSourceBuffers(instruction, index)) {
if (assignment.HasAllocation(*buffer)) {
worklist.push_back(buffer);
}
}
});
};
for (HloInstruction* user : hlo.users()) {
add_buffers_to_worklist(user);
for (HloInstruction* operand : user->operands()) {
add_buffers_to_worklist(operand);
}
}
add_buffers_to_worklist(&hlo);
for (HloInstruction* operand : hlo.operands()) {
add_buffers_to_worklist(operand);
}
std::set<BufferAllocation::Slice> buffers;
for (const HloValue* buffer : worklist) {
const BufferAllocation::Slice noalias_slice =
assignment.GetAssignedAllocation(*buffer).GetSlice(*buffer);
if (!buffer_slice.OverlapsWith(noalias_slice)) {
buffers.insert(noalias_slice);
constexpr int kMaxNoAliasSetSize = 500;
if (buffers.size() >= kMaxNoAliasSetSize) {
break;
}
}
}
if (buffers.empty()) {
return nullptr;
}
llvm::MDBuilder metadata_builder(domain->getContext());
std::vector<llvm::Metadata*> scopes;
for (const BufferAllocation::Slice noalias_slice : buffers) {
llvm::MDNode* scope = metadata_builder.createAliasScope(
"buffer: " + noalias_slice.ToString(), domain);
scopes.push_back(scope);
}
llvm::MDNode* noalias_list =
llvm::MDNode::get(domain->getContext(), AsArrayRef(scopes));
return noalias_list;
}
}
} | #include "absl/status/status.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/service/cpu/tests/cpu_codegen_test.h"
#include "tsl/platform/test.h"
namespace xla {
namespace cpu {
namespace {
class AliasAnalysisTest : public CpuCodegenTest {};
static absl::Status FakeCustomCallTarget(ffi::AnyBuffer,
ffi::Result<ffi::AnyBuffer>) {
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFakeCustomCallTarget, FakeCustomCallTarget,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"__xla_test$$FakeCustomCallTarget", "Host",
kFakeCustomCallTarget);
TEST_F(AliasAnalysisTest, EmbeddedComputationParamsMayAliasTemps) {
const char* hlo_string = R"(
HloModule while
body {
const.0.125 = f32[] constant(0.125)
body.state = f32[] parameter(0)
ROOT add.2.2 = f32[] add(const.0.125, body.state)
}
condition {
const.100 = f32[] constant(100)
condition.state = f32[] parameter(0)
addend = f32[] custom-call(condition.state), custom_call_target="__xla_test$$FakeCustomCallTarget", api_version=API_VERSION_TYPED_FFI
add = f32[] add(addend, condition.state)
ROOT greater-than = pred[] compare(const.100, add), direction=GT
}
ENTRY while3 {
const.0 = f32[] constant(0)
ROOT while = f32[] while(const.0), condition=condition, body=body
}
)";
CompileAndVerifyIr(hlo_string, R"(
; CHECK-LABEL: @body(ptr %retval
; CHECK: %[[add_result:.*]] = fadd float %[[fadd_lhs:.*]], %[[fadd_rhs:.*]]
; CHECK: store float %[[add_result]], ptr %[[store_dest:.*]], align 4, !alias.scope ![[alias_scope_md_for_store:[0-9]+]]
;
; CHECK-LABEL: @condition(ptr %retval, ptr noalias %run_options, ptr noalias %params
; CHECK: %[[cond_state_buf_ptr:.*]] = getelementptr inbounds ptr, ptr %buffer_table, i64 0
; CHECK: %[[cond_state_buf_untyped:.*]] = load ptr, ptr %[[cond_state_buf_ptr]]
; CHECK: load float, ptr %[[cond_state_buf_untyped]], align 4, !alias.scope ![[alias_scope_md_for_store]], !noalias ![[noalias_md_for_load:.*]]
;
; CHECK-LABEL: @while3(
![[alias_scope_md_for_store]] = !{![[buffer_idx_0:.*]]}
![[buffer_idx_0]] = !{!"buffer: {index:0, offset:0, size:4}", ![[aa_md_root:.*]]}
![[aa_md_root]] = !{!"XLA global AA domain"}
![[buffer_idx_1:.*]] = !{!"buffer: {index:1, offset:0, size:4}", !3}
![[buffer_idx_1_offset_16:.*]] = !{!"buffer: {index:1, offset:16, size:1}", !3}
![[noalias_md_for_load]] = !{![[buffer_idx_1_offset_16]], ![[buffer_idx_1]]}
}
)");
}
}
}
} |
1,992 | cpp | tensorflow/tensorflow | ir_array | third_party/xla/xla/service/llvm_ir/ir_array.cc | third_party/xla/xla/service/llvm_ir/ir_array_test.cc | #ifndef XLA_SERVICE_LLVM_IR_IR_ARRAY_H_
#define XLA_SERVICE_LLVM_IR_IR_ARRAY_H_
#include <map>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "xla/map_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace llvm_ir {
class IrArray {
public:
class Index {
public:
explicit Index(llvm::Type* index_ty) : index_type_(index_ty) {
CHECK(index_ty->isIntegerTy());
}
Index(llvm::Value* linear, const Shape& shape, llvm::IRBuilder<>* b);
Index(llvm::Value* linear, absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::IRBuilder<>* b);
Index(llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims, llvm::IRBuilder<>* b);
Index(absl::Span<llvm::Value* const> multidim, const Shape& shape,
llvm::Type* index_type);
Index(absl::Span<llvm::Value* const> multidim,
absl::Span<int64_t const> dimensions, llvm::Type* index_type);
Index AddOffsetToDim(llvm::Value* addend, int64_t dim,
llvm::IRBuilder<>* b) const {
Index with_offset = *this;
with_offset.linear_ = nullptr;
with_offset.multidim_[dim] =
b->CreateAdd(with_offset.multidim_[dim], addend);
return with_offset;
}
Index AddOffset(absl::Span<llvm::Value* const> offsets,
llvm::IRBuilder<>* b) const {
CHECK_EQ(multidim_.size(), offsets.size());
Index with_offset = *this;
with_offset.linear_ = nullptr;
for (auto&& [dim, offset] : llvm::zip(with_offset.multidim_, offsets)) {
dim = b->CreateAdd(dim, offset);
}
return with_offset;
}
const std::vector<llvm::Value*>& multidim() const { return multidim_; }
const std::vector<int64_t>& dims() const { return dims_; }
llvm::Value* linear() const { return linear_; }
size_t size() const { return multidim().size(); }
llvm::Value* operator[](size_t i) const { return multidim()[i]; }
using const_iterator = std::vector<llvm::Value*>::const_iterator;
const_iterator begin() const { return multidim().begin(); }
const_iterator end() const { return multidim().end(); }
bool LinearValidOnShape(const Shape& a) const;
static bool ShapeIsCompatible(const Shape& a, const Shape& b);
bool ShapeIsCompatible(const Shape& a) const {
return ShapeIsCompatible(a, AsShapeWithType(a.element_type()));
}
Shape AsShapeWithType(PrimitiveType element_type) const {
return ShapeUtil::MakeShapeWithDenseLayout(element_type, dims_,
layout_.minor_to_major());
}
Index SourceIndexOfReshape(const Shape& output_shape,
const Shape& input_shape,
llvm::IRBuilder<>* builder) const;
Index SourceIndexOfSlice(const Shape& operand_shape,
absl::Span<const int64_t> starts,
absl::Span<const int64_t> strides,
llvm::IRBuilder<>* builder) const;
Index SourceIndexOfTranspose(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping) const;
Index SourceIndexOfBitcast(const Shape& shape, const Shape& operand_shape,
llvm::IRBuilder<>* builder) const;
Index SourceIndexOfBitcast(const Shape& operand_shape,
llvm::IRBuilder<>* builder) const;
Index SourceIndexOfBroadcast(const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping,
llvm::IRBuilder<>* builder) const;
llvm::Value* Linearize(absl::Span<const int64_t> dimensions,
llvm::IRBuilder<>* builder) const;
llvm::Value* Linearize(const std::vector<llvm::Value*>& dynamic_dims,
llvm::IRBuilder<>* builder) const;
llvm::Type* GetType() const { return index_type_; }
llvm::Constant* GetConstantWithIndexType(int64_t c) const {
return llvm::ConstantInt::get(index_type_, c);
}
private:
Index(absl::Span<llvm::Value* const> multidim, llvm::Value* linear,
const Shape& shape, llvm::Type* index_type);
void Delinearize(std::vector<llvm::Value*>* multidim, llvm::Value* linear,
const Shape& shape, llvm::IRBuilder<>* b) const;
void Delinearize(std::vector<llvm::Value*>* multidim, llvm::Value* linear,
const Shape& shape, absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b) const;
std::vector<llvm::Value*> multidim_;
llvm::Value* linear_ = nullptr;
Layout layout_;
std::vector<int64_t> dims_;
llvm::Type* index_type_;
};
IrArray() : base_ptr_(nullptr) {}
IrArray(llvm::Value* base_ptr, llvm::Type* pointee_type, Shape shape);
IrArray(IrArray&& other) = default;
IrArray(const IrArray& other) = default;
IrArray& operator=(IrArray&& other) = default;
IrArray& operator=(const IrArray& other) = default;
llvm::Value* GetBasePointer() const { return base_ptr_; }
llvm::Type* GetBasePointeeType() const { return pointee_type_; }
llvm::Type* GetElementLlvmType() const { return element_type_; }
const Shape& GetShape() const { return shape_; }
llvm::Value* EmitArrayElementAddress(
const Index& index, llvm::IRBuilder<>* b, absl::string_view name = "",
bool use_linear_index = true, llvm::Value** bit_offset = nullptr) const;
void AnnotateLoadStoreInstructionWithMetadata(
llvm::Instruction* instruction) const;
llvm::Value* EmitReadArrayElement(const Index& index, llvm::IRBuilder<>* b,
absl::string_view name = "",
bool use_linear_index = true) const;
void EmitWriteArrayElement(const Index& index, llvm::Value* value,
llvm::IRBuilder<>* b,
bool use_linear_index = true) const;
IrArray CastToShape(const Shape& new_shape, llvm::IRBuilder<>* b) const;
void AddAliasScopeMetadata(llvm::MDNode* alias_scope) {
CHECK_NE(alias_scope, nullptr);
AddMetadata(llvm::LLVMContext::MD_alias_scope, alias_scope);
}
void AddNoaliasMetadata(llvm::MDNode* noalias) {
CHECK_NE(noalias, nullptr);
AddMetadata(llvm::LLVMContext::MD_noalias, noalias);
}
void MarkInvariantOverWholeProgram(llvm::LLVMContext* context) {
if (is_invariant_) {
return;
}
is_invariant_ = true;
AddMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(*context, {}));
}
const std::map<int, llvm::MDNode*>& metadata() const { return metadata_; }
private:
void AddMetadata(int kind, llvm::MDNode* md) {
InsertOrDie(&metadata_, kind, md);
}
llvm::Value* EmitLinearArrayElementAddress(
const Index& index, llvm::IRBuilder<>* b, absl::string_view name = "",
llvm::Value** bit_offset = nullptr) const;
llvm::Value* base_ptr_;
llvm::Type* pointee_type_;
llvm::Type* element_type_;
Shape shape_;
std::map<int, llvm::MDNode*> metadata_;
bool is_invariant_ = false;
};
}
}
#endif
#include "xla/service/llvm_ir/ir_array.h"
#include <cstdint>
#include <optional>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace llvm_ir {
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
llvm::Value* linear, const Shape& shape,
llvm::Type* index_type)
: Index(multidim, shape, index_type) {
CHECK_NE(linear, nullptr);
linear_ = linear;
}
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
llvm::IRBuilder<>* b) const {
int64_t divisor = 1;
const Layout& layout = shape.layout();
for (int64_t i = 0; i < layout.minor_to_major_size(); ++i) {
int64_t dimension = layout.minor_to_major(i);
int64_t size_of_current_dimension = shape.dimensions(dimension);
auto* quot = b->CreateUDiv(linear, GetConstantWithIndexType(divisor));
if (i < layout.minor_to_major_size() - 1) {
(*multidim)[dimension] = b->CreateURem(
quot, GetConstantWithIndexType(size_of_current_dimension));
} else {
(*multidim)[dimension] = quot;
}
divisor *= size_of_current_dimension;
}
}
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b) const {
CHECK_EQ(shape.dimensions_size(), dynamic_dims.size());
CHECK_EQ(multidim_.size(), shape.rank());
llvm::Value* divisor = GetConstantWithIndexType(1);
const Layout& layout = shape.layout();
for (int64_t i = 0; i < layout.minor_to_major_size(); ++i) {
int64_t dimension = layout.minor_to_major(i);
auto* quot = b->CreateUDiv(linear, divisor, "quot");
if (i < layout.minor_to_major_size() - 1) {
llvm::Value* casted_dynamic_dim =
b->CreateIntCast(dynamic_dims[dimension], quot->getType(),
true);
(*multidim)[dimension] =
b->CreateURem(quot, casted_dynamic_dim, "dim_value");
divisor = b->CreateMul(divisor, casted_dynamic_dim, "divisor");
} else {
(*multidim)[dimension] = quot;
}
}
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, b);
}
IrArray::Index::Index(llvm::Value* linear,
absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK_EQ(multidim.size(), shape.rank());
for (auto dim : multidim) {
if (dim) {
CHECK_EQ(dim->getType(), index_type_);
}
}
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, b);
for (int i = 0; i < multidim.size(); ++i) {
if (multidim[i] != nullptr) {
multidim_[i] = multidim[i];
}
}
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, dynamic_dims, b);
}
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
absl::Span<int64_t const> dimensions,
llvm::Type* index_type)
: Index(multidim, ShapeUtil::MakeShape( PRED, dimensions),
index_type) {}
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::Type* index_type)
: multidim_(multidim.begin(), multidim.end()),
linear_(nullptr),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()),
index_type_(index_type) {
CHECK_NE(index_type_, nullptr);
CHECK_EQ(shape.dimensions_size(), multidim.size());
for (const auto* dim : multidim) {
CHECK_NE(dim, nullptr);
}
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
}
IrArray::IrArray(llvm::Value* base_ptr, llvm::Type* pointee_type, Shape shape)
: base_ptr_(base_ptr),
pointee_type_(pointee_type),
shape_(std::move(shape)) {
TF_CHECK_OK(ShapeUtil::ValidateShape(shape));
CHECK(base_ptr_->getType()->isPointerTy());
int depth = 0;
element_type_ = pointee_type;
while (llvm::ArrayType* array_type =
llvm::dyn_cast<llvm::ArrayType>(element_type_)) {
element_type_ = array_type->getElementType();
++depth;
}
if (!shape_.IsArray() || ShapeUtil::IsScalar(shape_)) {
DCHECK(depth == 1 || depth == 0) << depth;
} else {
DCHECK_EQ(depth, shape_.rank()) << shape.ShortDebugString();
}
}
bool IrArray::Index::LinearValidOnShape(const Shape& a) const {
auto b = ShapeUtil::MakeShape(a.element_type(), dims_);
*b.mutable_layout() = layout_;
return linear_ != nullptr &&
ShapeUtil::ElementsIn(a) == ShapeUtil::ElementsIn(b) &&
ShapeUtil::ReshapeIsBitcast(a, b);
}
IrArray::Index IrArray::Index::SourceIndexOfReshape(
const Shape& output_shape, const Shape& input_shape,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(multidim_.size(), output_shape.rank());
std::vector<llvm::Value*> source_multidim_index(
input_shape.rank(), llvm::UndefValue::get(index_type_));
if (std::optional<ShapeUtil::ShapeEqualityDescriptor> trivial_reshape =
ShapeUtil::InsertedOrDeleted1SizedDimensions(input_shape,
output_shape)) {
for (int64_t i = 0, j = 0, k = 0, l = 0; i < source_multidim_index.size();
++i) {
if (j == trivial_reshape->deleted_dimensions.size() ||
trivial_reshape->deleted_dimensions[j] > i) {
while (l < trivial_reshape->inserted_dimensions.size() &&
trivial_reshape->inserted_dimensions[l] == k) {
++k;
++l;
}
source_multidim_index[i] = multidim_[k];
++k;
} else {
source_multidim_index[i] = GetConstantWithIndexType(0);
++j;
}
}
} else {
const auto common_factors =
CommonFactors(input_shape.dimensions(), output_shape.dimensions());
for (ssize_t k = common_factors.size() - 2; k >= 0; --k) {
absl::Span<int64_t const> dimensions = output_shape.dimensions().subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second);
llvm::Value* logical_linear_index =
Index(absl::Span<llvm::Value* const>(multidim_).subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second),
dimensions, index_type_)
.Linearize(dimensions, builder);
for (int64_t i = common_factors[k + 1].first - 1;
i >= common_factors[k].first; --i) {
llvm::Value* divisor =
GetConstantWithIndexType(input_shape.dimensions(i));
if (input_shape.dimensions(i) == 1) {
source_multidim_index[i] = GetConstantWithIndexType(0);
} else if (i == common_factors[k].first) {
source_multidim_index[i] = logical_linear_index;
} else {
source_multidim_index[i] =
builder->CreateURem(logical_linear_index, divisor);
}
logical_linear_index =
builder->CreateUDiv(logical_linear_index, divisor);
}
}
}
if (linear() != nullptr && LayoutUtil::HasLayout(input_shape) &&
LayoutUtil::HasLayout(output_shape) &&
ShapeUtil::ReshapeIsBitcast(input_shape, output_shape)) {
return Index(source_multidim_index, linear(), input_shape, index_type_);
}
return Index(source_multidim_index, input_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfSlice(
const Shape& operand_shape, absl::Span<const int64_t> starts,
absl::Span<const int64_t> strides, llvm::IRBuilder<>* builder) const {
std::vector<llvm::Value*> source_multi_index(multidim_.size());
for (int i = 0; i < multidim_.size(); ++i) {
int64_t stride = strides[i];
if (stride != 1) {
source_multi_index[i] = builder->CreateAdd(
builder->CreateMul(multidim_[i], GetConstantWithIndexType(stride)),
GetConstantWithIndexType(starts[i]));
} else {
source_multi_index[i] =
builder->CreateAdd(multidim_[i], GetConstantWithIndexType(starts[i]));
}
}
return Index(source_multi_index, operand_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfTranspose(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping) const {
std::vector<llvm::Value*> operand_multidim_index =
PermuteInverse(multidim(), dimension_mapping);
if (linear() != nullptr && LayoutUtil::HasLayout(operand_shape) &&
LayoutUtil::HasLayout(shape) &&
ShapeUtil::TransposeIsBitcast(operand_shape, shape, dimension_mapping)) {
return Index(operand_multidim_index, linear(), operand_shape, index_type_);
}
return Index(operand_multidim_index, operand_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfBitcast(
const Shape& shape, const Shape& operand_shape,
llvm::IRBuilder<>* builder) const {
CHECK(LayoutUtil::HasLayout(shape) && LayoutUtil::HasLayout(operand_shape));
const ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(operand_shape, shape);
if (std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition)) {
return SourceIndexOfReshape(shape, operand_shape, builder);
}
if (std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposition)) {
const auto& decomposition_transpose =
std::get<ShapeUtil::BitcastDecompositionTranspose>(decomposition);
return SourceIndexOfTranspose(shape, operand_shape,
decomposition_transpose.transpose_dims);
}
CHECK(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
const auto& decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
Index index = *this;
if (!decomposition_trt.IsTranspose2Identity()) {
index = index.SourceIndexOfTranspose(shape, decomposition_trt.reshape_shape,
decomposition_trt.transpose2_dims);
}
index =
index.SourceIndexOfReshape(decomposition_trt.reshape_shape,
decomposition_trt.transpose1_shape, builder);
if (!decomposition_trt.IsTranspose1Identity()) {
index = index.SourceIndexOfTranspose(decomposition_trt.transpose1_shape,
operand_shape,
decomposition_trt.transpose1_dims);
}
return index;
}
IrArray::Index IrArray::Index::SourceIndexOfBitcast(
const Shape& operand_shape, llvm::IRBuilder<>* builder) const {
auto shape = ShapeUtil::MakeShape(F32, dims_);
*shape.mutable_layout() = layout_;
return SourceIndexOfBitcast(shape, operand_shape, builder);
}
IrArray::Index IrArray::Index::SourceIndexOfBroadcast(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping,
llvm::IRBuilder<>* builder) const {
int64_t rank = operand_shape.rank();
std::vector<llvm::Value*> source_index(rank);
for (int64_t i = 0; i < rank; ++i) {
source_index[i] = multidim_[dimension_mapping[i]];
}
if (linear_ == nullptr || !LayoutUtil::HasLayout(operand_shape) ||
!LayoutUtil::HasLayout(shape) || rank == 1) {
return Index(source_index, operand_shape, ind | #include "xla/service/llvm_ir/ir_array.h"
#include <string>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/test.h"
#include "xla/tests/filecheck.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace llvm_ir {
namespace {
class IrArrayTest : public ::testing::Test {
public:
IrArrayTest()
: context_{},
module_{"IrArrayTest module", context_},
builder_{context_} {}
llvm::Function* EmitFunctionAndSetInsertPoint(
llvm::ArrayRef<llvm::Type*> params) {
llvm::FunctionType* function_type =
llvm::FunctionType::get(llvm::Type::getVoidTy(context_), params,
false);
llvm::Function* function = llvm::Function::Create(
function_type, llvm::Function::LinkageTypes::ExternalLinkage,
"test_function", module_);
llvm::BasicBlock* bb = llvm::BasicBlock::Create(context_, "bb", function);
builder_.SetInsertPoint(bb);
return function;
}
protected:
llvm::LLVMContext context_;
llvm::Module module_;
llvm::IRBuilder<> builder_;
};
TEST_F(IrArrayTest, TestShapeIsCompatible) {
xla::Shape a =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 20}, {2, 1, 0});
xla::Shape b =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 20}, {2, 0, 1});
xla::Shape c =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 20}, {2, 1, 0});
xla::Shape d =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 30}, {2, 1, 0});
xla::Shape e =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 30}, {2, 0, 1});
xla::Shape f =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 30}, {2, 1, 0});
EXPECT_TRUE(IrArray::Index::ShapeIsCompatible(a, b));
EXPECT_TRUE(IrArray::Index::ShapeIsCompatible(a, c));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, d));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, e));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, f));
}
TEST_F(IrArrayTest, EmitArrayElementAddress) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(F32, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitArrayElementAddress(index, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: getelementptr inbounds float, ptr %[[ptr]], i32 %[[idx]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressNonLinear) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(F32, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitArrayElementAddress(index, &builder_, "",
false);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: %[[udiv1:[0-9]+]] = udiv i32 %[[idx]], 1
CHECK: %[[urem:[0-9]+]] = urem i32 %[[udiv1]], 5
CHECK: %[[udiv2:[0-9]+]] = udiv i32 %[[idx]], 5
CHECK: getelementptr inbounds [3 x [5 x float]], ptr %0, i32 0, i32 %[[udiv2]], i32 %[[urem]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
llvm::Value* bit_offset;
ir_array.EmitArrayElementAddress(index, &builder_, "",
true,
&bit_offset);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: %[[rem:[0-9]+]] = urem i32 %[[idx]], 2
CHECK: %[[div:[0-9]+]] = udiv i32 %[[idx]], 2
CHECK: getelementptr inbounds i8, ptr %[[ptr]], i32 %[[div]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressInt4NonLinear) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{llvm::PointerType::get(context_, 0), llvm::Type::getInt32Ty(context_),
llvm::Type::getInt32Ty(context_)});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index0 = function->getArg(1);
llvm::Argument* array_index1 = function->getArg(2);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index({array_index0, array_index1}, shape,
builder_.getInt32Ty());
llvm::Value* bit_offset;
ir_array.EmitArrayElementAddress(index, &builder_, "",
false,
&bit_offset);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]], i32 %[[idx1:[0-9]+]]) {
CHECK: %[[mul1:[0-9]+]] = mul nuw nsw i32 %[[idx1]], 1
CHECK: %[[add1:[0-9]+]] = add nuw nsw i32 0, %[[mul1]]
CHECK: %[[mul2:[0-9]+]] = mul nuw nsw i32 %[[idx0]], 5
CHECK: %[[add2:[0-9]+]] = add nuw nsw i32 %[[add1]], %[[mul2]]
CHECK: %[[udiv:[0-9]+]] = udiv i32 %[[add2]], 2
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[udiv]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitReadArrayElementInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitReadArrayElement(index, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]]) {
COM: Calculate the address.
CHECK: %[[urem:[0-9]+]] = urem i32 %[[idx0]], 2
CHECK: %[[addr:[0-9]+]] = udiv i32 %[[idx0]], 2
CHECK: %[[mul:[0-9]+]] = mul i32 %[[urem]], 4
CHECK: %[[sub:[0-9]+]] = sub i32 4, %[[mul]]
CHECK: %[[trunc:[0-9]+]] = trunc i32 %[[sub]] to i8
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[addr]]
COM: Load the element, optionally shift, and truncate.
CHECK: %[[load:[0-9]+]] = load i8, ptr %[[gep]], align 1
CHECK: %[[shift:[0-9]+]] = lshr i8 %[[load]], %[[trunc]]
CHECK: trunc i8 %[[shift]] to i4
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitWriteArrayElementInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty(), builder_.getIntNTy(4)});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
llvm::Argument* val_to_write = function->getArg(2);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitWriteArrayElement(index, val_to_write, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]], i4 %[[val:[0-9]+]]) {
COM: Calculate the address.
CHECK: %[[urem:[0-9]+]] = urem i32 %[[idx0]], 2
CHECK: %[[addr:[0-9]+]] = udiv i32 %[[idx0]], 2
CHECK: %[[mul:[0-9]+]] = mul i32 %[[urem]], 4
CHECK: %[[sub:[0-9]+]] = sub i32 4, %[[mul]]
CHECK: %[[trunc:[0-9]+]] = trunc i32 %[[sub]] to i8
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[addr]]
COM: Load address, replace 4 bits with the value, and write to address.
CHECK: %[[load:[0-9]+]] = load i8, ptr %[[gep]], align 1
CHECK: %[[zext:[0-9]+]] = zext i4 %[[val]] to i8
CHECK: %[[shifted_val:[0-9]+]] = shl i8 %[[zext]], %[[trunc]]
CHECK: %[[mask:[0-9]+]] = call i8 @llvm.fshl.i8(i8 -16, i8 -16, i8 %[[trunc]])
CHECK: %[[and:[0-9]+]] = and i8 %[[load]], %[[mask]]
CHECK: %[[towrite:[0-9]+]] = or i8 %[[and]], %[[shifted_val]]
CHECK: store i8 %[[towrite]], ptr %[[gep]], align 1
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
}
}
} |
1,993 | cpp | tensorflow/tensorflow | heap_simulator | third_party/xla/xla/service/heap_simulator/heap_simulator.cc | third_party/xla/xla/service/heap_simulator/heap_simulator_test.cc | #ifndef XLA_SERVICE_HEAP_SIMULATOR_HEAP_SIMULATOR_H_
#define XLA_SERVICE_HEAP_SIMULATOR_HEAP_SIMULATOR_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iostream>
#include <list>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#if defined(__GNUC__) || defined(__clang__)
#include "absl/container/btree_map.h"
#endif
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/buffer_value_containers.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/repacking.h"
#include "xla/service/tuple_points_to_analysis.h"
namespace xla {
template <typename BufferType>
class HeapAlgorithm;
template <typename BufferType>
class NoFragmentationStatsHeap;
class HeapSimulator {
public:
struct Chunk {
static Chunk FromOffsetEnd(int64_t offset, int64_t end);
static Chunk FromOffsetSize(int64_t offset, int64_t size);
Chunk() : Chunk(-1, 0) {}
std::string ToString() const;
int64_t offset;
int64_t size;
int64_t chunk_end() const { return offset + size; }
bool OverlapsWith(Chunk other_chunk) const;
bool operator==(const Chunk& other) const {
return offset == other.offset && size == other.size;
}
private:
Chunk(int64_t offset, int64_t size) : offset(offset), size(size) {}
friend std::ostream& operator<<(std::ostream& stream, const Chunk& chunk);
};
template <typename BufferType>
struct HeapResult {
int64_t UpdatedHeapSize(const Chunk& chunk) const {
return std::max(heap_size, chunk.chunk_end());
}
absl::flat_hash_map<const BufferType*, Chunk> chunk_map;
int64_t heap_size = 0;
};
template <typename BufferType>
struct Result {
std::vector<HeapResult<BufferType>> heap_results;
int64_t heap_size = 0;
int64_t fragmentation_size = 0;
HeapSimulatorTrace debug_trace;
};
struct Options {
Options()
: may_reuse_operand_buffers(true),
alloc_constants(false),
buffers_to_assign(nullptr) {}
bool may_reuse_operand_buffers;
bool alloc_constants;
const absl::flat_hash_set<const HloValue*>* buffers_to_assign;
};
static absl::StatusOr<int64_t> MinimumMemoryForModule(
const HloSchedule& schedule,
const LogicalBuffer::SizeFunction& size_function);
static absl::StatusOr<int64_t> MinimumMemoryForComputation(
const HloComputation& computation, const HloInstructionSequence& sequence,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const absl::flat_hash_map<const HloComputation*, int64_t>*
memory_by_computation = nullptr);
static absl::StatusOr<int64_t> MinimumMemoryForComputation(
const HloComputation& computation, const HloInstructionSequence& sequence,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const HloSchedule* schedule);
static absl::StatusOr<Result<HloValue>> Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloModule& module, const HloSchedule& schedule,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn,
const Options& options = Options());
static absl::StatusOr<Result<HloValue>> Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn,
const Options& options = Options(),
const absl::flat_hash_map<const HloComputation*, int64_t>*
memory_by_computation = nullptr);
static absl::StatusOr<Result<HloValue>> Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const HloSchedule* schedule,
const Options& options = Options());
private:
HeapSimulator(std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const BufferValue::SizeFunction& size_fn,
const Options& options, const HloSchedule* schedule = nullptr,
const absl::flat_hash_map<const HloComputation*, int64_t>*
memory_by_computation = nullptr);
~HeapSimulator();
absl::Status RunComputation(
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis, HloLiveRange* live_range);
bool IgnoreBuffer(const HloValue* buffer) const;
void Alloc(const HloValue* buffer, const HloInstruction* instruction);
void Free(const HloValue* buffer, const HloInstruction* instruction);
void ShareBuffer(const HloValue* buffer, const HloValue* shared,
const HloInstruction* instruction);
int64_t GetBufferSize(const HloValue* buffer) const;
bool InSameSharedGroup(const HloValue* left, const HloValue* right);
absl::StatusOr<Result<HloValue>> Finish();
void FillDebugTrace(HeapSimulatorTrace::Event::Kind kind,
const HloValue* buffer, const HloInstruction* instruction,
const HloValue* share_with_canonical);
const std::unique_ptr<NoFragmentationStatsHeap<HloValue>>
no_fragmentation_stats_;
const std::unique_ptr<HeapAlgorithm<HloValue>> algorithm_;
const BufferValue::SizeFunction size_fn_;
const Options options_;
const HloSchedule* schedule_;
const absl::flat_hash_map<const HloComputation*, int64_t>*
memory_by_computation_;
absl::flat_hash_set<const HloValue*> allocated_buffers_;
absl::flat_hash_set<const HloValue*> freed_buffers_;
absl::flat_hash_map<const HloValue*, int64_t> buffer_sizes_;
HeapSimulatorTrace debug_trace_;
};
template <typename BufferType>
class HeapAlgorithm {
public:
using Chunk = HeapSimulator::Chunk;
using Result = HeapSimulator::Result<BufferType>;
using HeapResult = HeapSimulator::HeapResult<BufferType>;
virtual ~HeapAlgorithm() = default;
virtual void Alloc(const BufferType* buffer, int64_t size) = 0;
virtual void AccountForSubcomputationMemory(
const HloInstruction* instruction,
int64_t alloc_size_by_instruction,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation) {}
virtual void Free(const BufferType* buffer, int64_t size) = 0;
virtual void ShareWith(const BufferType* buffer, const BufferType* share_with,
int64_t size) {
Alloc(buffer, size);
}
virtual absl::StatusOr<Result> Finish() = 0;
};
template <typename BufferType>
class NoFragmentationStatsHeap : public HeapAlgorithm<BufferType> {
public:
using Result = HeapSimulator::Result<BufferType>;
NoFragmentationStatsHeap() = default;
~NoFragmentationStatsHeap() override = default;
void Alloc(const BufferType* buffer, int64_t size) override;
void AccountForSubcomputationMemory(
const HloInstruction* instruction, int64_t alloc_size_by_instruction,
const absl::flat_hash_map<const HloComputation*, int64_t>&
memory_by_computation) override;
void Free(const BufferType* buffer, int64_t size) override;
absl::StatusOr<Result> Finish() override;
private:
int64_t current_heap_size_ = 0;
int64_t max_heap_size_ = 0;
};
struct BufferIntervalTreeNode {
int64_t start;
int64_t end;
int64_t subtree_end;
HeapSimulator::Chunk chunk;
BufferIntervalTreeNode* left;
BufferIntervalTreeNode* right;
BufferIntervalTreeNode* parent;
};
class BufferIntervalTree {
public:
using Chunk = HeapSimulator::Chunk;
void Add(int64_t start, int64_t end, const Chunk& chunk);
bool Remove(int64_t start, int64_t end, const Chunk& chunk);
std::vector<Chunk> ChunksOverlappingInTime(int64_t start, int64_t end) const;
BufferIntervalTreeNode* GetRoot() { return root_; }
private:
BufferIntervalTreeNode* root_ = nullptr;
std::list<BufferIntervalTreeNode> node_storage_;
};
class SliceTimePermutationIterator {
public:
enum class Ty : std::int8_t {
kAll,
kPreferred,
};
static std::unique_ptr<SliceTimePermutationIterator> CreateForNewAllocation(
Ty ty, absl::Span<const int64_t> inclusive_slice_start_times);
static std::unique_ptr<SliceTimePermutationIterator> CreateForRepack(
Ty ty, const SlicedAllocationData* original_sliced_allocation);
virtual ~SliceTimePermutationIterator() = default;
virtual void Begin() = 0;
virtual bool Done() const = 0;
virtual void Next() = 0;
virtual absl::Span<const int64_t> Get() const = 0;
protected:
SliceTimePermutationIterator() = default;
};
template <typename BufferType>
class GlobalDecreasingSizeBestFitHeap : public HeapAlgorithm<BufferType> {
public:
using HeapResult = HeapSimulator::HeapResult<BufferType>;
using Result = HeapSimulator::Result<BufferType>;
using Chunk = HeapSimulator::Chunk;
#if defined(__GNUC__) || defined(__clang__)
using FreeChunks = absl::btree_map<int64_t, int64_t, std::greater<int64_t>>;
#else
using FreeChunks = std::map<int64_t, int64_t, std::greater<int64_t>>;
#endif
enum Type {
kSpatial = 0,
kTemporal,
kCustom
};
struct BufferInterval {
std::string ToString() const;
const BufferType* buffer = nullptr;
int64_t size = -1;
int64_t start = -1;
int64_t end = -1;
absl::InlinedVector<const BufferType*, 2> colocations;
bool need_allocation = false;
};
using BufferIntervalCompare =
std::function<bool(const BufferInterval&, const BufferInterval&)>;
class SlicedBufferInterval {
public:
static const SlicedBufferInterval CreateConstInterval(
const BufferInterval& full_buffer_interval);
static SlicedBufferInterval CreateMutableInterval(
BufferInterval& full_buffer_interval);
SlicedBufferInterval() = delete;
void Slice(absl::Span<const int64_t> slice_sizes_sorted_by_offset);
void UpdateExclusiveSliceStartTimes(
const std::vector<int64_t>& exclusive_start_times);
void UpdateInclusiveSliceStartTimes(
const std::vector<int64_t>& inclusive_start_times);
void UpdateEndTime(int64_t end_time);
const BufferInterval& full_buffer_interval() const;
size_t num_slices() const { return slice_sizes_sorted_by_offset_.size(); }
const std::vector<int64_t>& SliceSizesSortedByOffset() const;
std::vector<int64_t> inclusive_start_times() const;
const BufferInterval& IntervalForMakeFreeChunks(int64_t slice_time) const;
std::string ToString() const;
private:
explicit SlicedBufferInterval(
const BufferInterval& full_buffer_interval,
BufferInterval* mutable_full_buffer_interval = nullptr);
const BufferInterval& full_buffer_interval_;
BufferInterval* mutable_full_buffer_interval_ = nullptr;
std::vector<int64_t> slice_sizes_sorted_by_offset_;
std::vector<BufferInterval> make_free_chunks_intervals_;
};
class SlicedAllocationFinder {
public:
using ChunksSortedBySliceTime = std::vector<Chunk>;
struct FreeChunkPiece {
std::string ToString() const;
int64_t earliest_free_slice_time;
Chunk dimensions;
};
#if defined(__GNUC__) || defined(__clang__)
using FreeChunkPieces =
absl::btree_map<int64_t, FreeChunkPiece, std::greater<int64_t>>;
#else
using FreeChunkPieces =
std::map<int64_t, FreeChunkPiece, std::greater<int64_t>>;
#endif
struct FreeChunkRoot {
FreeChunkRoot(const Chunk& free_chunk, int64_t free_chunk_slice_time);
std::string ToString() const;
void Update(const Chunk& free_chunk, int64_t free_chunk_slice_time);
Chunk chunk;
FreeChunkPieces pieces;
};
#if defined(__GNUC__) || defined(__clang__)
using FreeChunkRoots =
absl::btree_map<int64_t, FreeChunkRoot, std::greater<int64_t>>;
#else
using FreeChunkRoots =
std::map<int64_t, FreeChunkRoot, std::greater<int64_t>>;
#endif
static bool AllOffsetsAllowed(int64_t offset) { return true; }
SlicedAllocationFinder(
absl::Span<const FreeChunks> free_chunks_per_slice_time,
std::vector<int64_t> sorted_slice_sizes, int64_t max_colocation_size,
int64_t preferred_offset, int64_t alignment,
std::unique_ptr<SliceTimePermutationIterator>
slice_time_permutation_iterator,
absl::AnyInvocable<bool(int64_t) const> is_offset_allowed =
&AllOffsetsAllowed);
std::string FreeChunksToAsciiArt() const;
std::string ToString() const;
ChunksSortedBySliceTime Find() const;
ChunksSortedBySliceTime FindForOffset(int64_t offset) const;
private:
int64_t EarliestSliceTime() const { return 0; }
int64_t LatestSliceTime() const { return sorted_slice_sizes_.size() - 1; }
absl::Status DoesPermutationFi | #include "xla/service/heap_simulator/heap_simulator.h"
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/status_macros.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class MinimumMemoryForSequenceTest : public HloTestBase {};
TEST_F(MinimumMemoryForSequenceTest, MultiComputation) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 0));
HloInstruction* cond_data = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_data, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param_iter"));
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({iter, data}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_data, cond_lt});
schedule.set_sequence(body_computation, {body_param});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(25,
HeapSimulator::MinimumMemoryForModule(schedule, size_fn).value());
}
TEST_F(MinimumMemoryForSequenceTest, SubcomputationAccounting) {
auto module = CreateNewVerifiedModule();
const Shape r0f32 = ShapeUtil::MakeShape(F32, {});
const Shape r1f32 = ShapeUtil::MakeShape(F32, {4});
const Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 4});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "cond_param"));
HloInstruction* slice =
cond_builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1}), cond_param, {0}, {1}, {1}));
HloInstruction* reshape =
cond_builder.AddInstruction(HloInstruction::CreateReshape(r0f32, slice));
HloInstruction* zero = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
HloInstruction* cond_comparison = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), reshape,
zero, ComparisonDirection::kNe));
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "body_param"));
HloInstruction* one_vector =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1})));
HloInstruction* subtract =
body_builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, body_param, one_vector));
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* while_init =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1})));
HloInstruction* while_loop =
builder.AddInstruction(HloInstruction::CreateWhile(
r1f32, cond_computation, body_computation, while_init));
HloInstruction* bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(r2f32, while_loop, {1}));
HloInstruction* matrix = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>(
{{1.0, 2.0, 3.0, 4.0}, {1.0, 2.0, 3.0, 4.0}})));
HloInstruction* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(r2f32, matrix, {0, 1}));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, transpose, bcast));
auto entry_computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
std::vector<HloInstruction*> cond_vec = {cond_param, slice, reshape, zero,
cond_comparison};
std::vector<HloInstruction*> while_body_vec = {body_param, one_vector,
subtract};
std::vector<HloInstruction*> entry_comp_vec = {while_init, while_loop, bcast,
matrix, transpose, add};
schedule.set_sequence(cond_computation, cond_vec);
schedule.set_sequence(body_computation, while_body_vec);
schedule.set_sequence(entry_computation, entry_comp_vec);
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
absl::flat_hash_map<const HloComputation*, int64_t> memory_by_computation;
memory_by_computation[cond_computation] = 5;
memory_by_computation[body_computation] = 16;
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module.get()).value();
EXPECT_EQ(64, HeapSimulator::MinimumMemoryForComputation(
*entry_computation, schedule.sequence(entry_computation),
*alias_analysis, size_fn, &memory_by_computation)
.value());
}
const char kAlloc[] = "Alloc";
const char kFree[] = "Free";
const char kShare[] = "Share";
const char kFinish[] = "Finish";
using CallSequence = std::vector<std::pair<std::string, const HloValue*>>;
class HeapCallRecorder : public HeapAlgorithm<HloValue> {
public:
explicit HeapCallRecorder(CallSequence* calls) : calls_(calls) {}
~HeapCallRecorder() override {}
void Alloc(const HloValue* buffer, int64_t size) override {
calls_->emplace_back(kAlloc, buffer);
const int64_t offset = result_.chunk_map.size();
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(offset, size));
}
void ShareWith(const HloValue* buffer, const HloValue* shared,
int64_t size) override {
calls_->emplace_back(kShare, buffer);
const int64_t offset = result_.chunk_map[shared].offset;
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(offset, size));
}
void Free(const HloValue* buffer, int64_t size) override {
calls_->emplace_back(kFree, buffer);
}
absl::StatusOr<Result> Finish() override {
calls_->emplace_back(kFinish, nullptr);
HeapSimulator::Result<HloValue> result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(std::move(result_));
return result;
}
private:
CallSequence* calls_;
HeapSimulator::HeapResult<HloValue> result_;
};
class HeapSimulatorTracker {
public:
explicit HeapSimulatorTracker(
std::unique_ptr<HloModule> module,
const std::vector<HloInstruction*>& instruction_sequence,
const std::vector<HloInstruction*>& must_alias_set = {},
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
module_ = std::move(module);
Init(instruction_sequence, can_share_buffer);
}
explicit HeapSimulatorTracker(
const std::string& name,
std::unique_ptr<HloComputation> entry_computation,
const std::vector<HloInstruction*>& instruction_sequence,
const std::vector<HloInstruction*>& must_alias_set = {},
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
HloModuleConfig config;
module_ = std::make_unique<HloModule>(name, config);
module_->AddEntryComputation(std::move(entry_computation));
Init(instruction_sequence, can_share_buffer);
}
explicit HeapSimulatorTracker(const std::string& name) {
HloModuleConfig config;
module_ = std::make_unique<HloModule>(name, config);
}
void RunWholeModule(
const std::vector<HloInstruction*>& full_module_sequence) {
alias_analysis_ = HloAliasAnalysis::Run(module_.get()).value();
HloSchedule schedule(module_.get());
absl::flat_hash_map<const HloInstruction*, int> reverse_position;
for (int i = 0; i < full_module_sequence.size(); ++i) {
HloInstruction* instruction = full_module_sequence[i];
schedule.GetOrCreateSequence(instruction->parent())
.push_back(instruction);
reverse_position[instruction] = full_module_sequence.size() - i;
}
auto size_fn = [&reverse_position](const BufferValue& buffer) {
return reverse_position[buffer.instruction()];
};
auto algorithm = std::make_unique<HeapCallRecorder>(&actual_calls_);
result_ = HeapSimulator::Run(std::move(algorithm), *module_, schedule,
*alias_analysis_, size_fn)
.value();
}
HloModule* module() { return module_.get(); }
const HloValue* BufferAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return &alias_analysis_->dataflow_analysis().GetUniqueValueAt(instruction,
index);
}
int64_t OffsetAt(const HloInstruction* instruction, const ShapeIndex& index) {
const HloValue* buffer = BufferAt(instruction, index);
CHECK_EQ(1, result_.heap_results.size());
return result_.heap_results.at(0).chunk_map.at(buffer).offset;
}
void ExpectCallSequence(const CallSequence& expected) const {
auto to_string = [](const CallSequence& sequence) {
std::string output;
for (int64_t i = 0; i < sequence.size(); ++i) {
auto pair = sequence.at(i);
absl::StrAppendFormat(&output, "%d", i);
absl::StrAppendFormat(&output, " :%s", pair.first);
if (pair.second != nullptr) {
absl::StrAppendFormat(&output, " - %s{%s}\n",
pair.second->instruction()->name(),
pair.second->index().ToString());
}
}
return output;
};
EXPECT_EQ(expected, actual_calls_) << "Expected:\n"
<< to_string(expected) << " \nActual:\n"
<< to_string(actual_calls_) << "\n";
}
void ExpectSharedBuffers(const HloInstruction* instruction_a,
const ShapeIndex& index_a,
const HloInstruction* instruction_b,
const ShapeIndex& index_b) {
int64_t offset_a = OffsetAt(instruction_a, index_a);
int64_t offset_b = OffsetAt(instruction_b, index_b);
EXPECT_EQ(offset_a, offset_b);
}
private:
void Init(const std::vector<HloInstruction*>& instruction_sequence,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
auto zero_size = [](const BufferValue& buffer) { return 0; };
auto algorithm = std::make_unique<HeapCallRecorder>(&actual_calls_);
alias_analysis_ =
HloAliasAnalysis::Run(module_.get(), can_share_buffer).value();
HeapSimulator::Options options;
result_ =
HeapSimulator::Run(std::move(algorithm), *module_->entry_computation(),
HloInstructionSequence(instruction_sequence),
*alias_analysis_, zero_size, options)
.value();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
CallSequence actual_calls_;
HeapSimulator::Result<HloValue> result_;
};
class HeapSimulatorTest : public HloTestBase {
protected:
HeapSimulatorTest() {}
~HeapSimulatorTest() override {}
Shape f32scalar_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
};
TEST_F(HeapSimulatorTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HeapSimulatorTracker tracker(TestName(), builder.Build(), {const0});
tracker.ExpectCallSequence({{kFinish, nullptr}});
}
TEST_F(HeapSimulatorTest, OneParam) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "param0"));
HeapSimulatorTracker tracker(TestName(), builder.Build(), {param0});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(param0, {})},
{kFree, tracker.BufferAt(param0, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, Multiply) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, add});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(mul, {})},
{kShare, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(add, {})},
{kFinish, nullptr},
});
tracker.ExpectSharedBuffers(add, {}, mul, {});
}
TEST_F(HeapSimulatorTest, FusionOutputsOnlyShareOnce) {
auto can_share_buffer =
[](const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index) -> std::optional<bool> {
return instr->opcode() == HloOpcode::kFusion &&
operand->shape().IsArray() &&
ShapeUtil::Equal(operand->shape(),
ShapeUtil::GetSubshape(instr->shape(), user_index));
};
HloModuleConfig config;
auto module = std::make_unique<HloModule>(TestName(), config);
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, paramA));
auto fusion_builder = HloComputation::Builder("simple_two_way_forwarding");
{
auto param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "x"));
fusion_builder.AddInstruction(HloInstruction::CreateTuple({param, param}));
}
auto fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {negate}, fusion_computation));
auto element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 0));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element1));
builder.AddInstruction(HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd,
negate0, negate1));
module->AddEntryComputation(builder.Build());
HeapSimulatorTracker tracker(
std::move(module),
{paramA, negate, fusion, element0, element1, negate0, negate1}, {},
can_share_buffer);
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(negate, {})},
{kAlloc, tracker.BufferAt(fusion, {})},
{kFree, tracker.BufferAt(negate, {})},
{kShare, tracker.BufferAt(fusion, {0})},
{kAlloc, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(negate0, {})},
{kFree, tracker.BufferAt(fusion, {0})},
{kFree, tracker.BufferAt(negate0, {})},
{kAlloc, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, FusionOutputsOnlyShareOnceOutputShortLived) {
auto can_share_buffer =
[](const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index) -> std::optional<bool> {
if (instr->opcode() == HloOpcode::kFusion) {
return true;
}
return false;
};
HloModuleConfig config;
auto module = std::make_unique<HloModule>(TestName(), config);
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, paramA));
auto fusion_builder = HloComputation::Builder("simple_two_way_forwarding");
{
auto param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "x"));
fusion_builder.AddInstruction(HloInstruction::CreateTuple({param, param}));
}
auto fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {negate}, fusion_computation));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 1));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element1));
module->AddEntryComputation(builder.Build());
HeapSimulatorTracker tracker(std::move(module),
{paramA, negate, fusion, element1, negate1}, {},
can_share_buffer);
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(negate, {})},
{kFree, tracker.BufferAt(negate, {})},
{kShare, tracker.BufferAt(fusion, {0})},
{kAlloc, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(fusion, {0})},
{kFree, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(negate1, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, BufferReusedOnce) {
HeapSimulatorTracker tracker(TestName());
auto builder = HloComputation::Builder(TestName());
HloComputation::Builder fusion_builder("fusion");
{
HloComputation::Builder& builder = fusion_builder;
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32vec4_, "A"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kExp, a_param));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, a_param));
builder.AddInstruction(HloInstruction::CreateTuple({exp, neg}));
}
auto fusion_computation =
tracker.module()->AddEmbeddedComputation(fusion_builder.Build());
auto a_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, a_param));
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {neg}, fusion_computation));
tracker.module()->AddEntryComputation(builder.Build());
tracker.RunWholeModule({a_param, neg, fusion});
auto neg_buffer = tracker.OffsetAt(neg, {});
int64_t output_buffer_0 = tracker.OffsetAt(fusion, {0});
int64_t output_buffer_1 = tracker.OffsetAt(fusion, {1});
EXPECT_TRUE((neg_buffer == output_buffer_0) ^
(neg_buffer == output_buffer_1));
}
TEST_F(HeapSimulatorTest, MultiplyDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyDotAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, dot, paramA));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot, add});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(dot, {})},
{kShare, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(add, {})},
{kFinish, nullptr},
});
tracker.ExpectSharedBuffers(add, {}, dot, {});
}
TEST_F(HeapSimulatorTest, MultiplyDotDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_ |
1,994 | cpp | tensorflow/tensorflow | simulator | third_party/xla/xla/service/memory_space_assignment/simulator.cc | third_party/xla/xla/service/memory_space_assignment/simulator_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_SIMULATOR_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_SIMULATOR_H_
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
namespace xla {
namespace memory_space_assignment {
class RuntimeSimulator {
public:
explicit RuntimeSimulator(CostAnalysis* cost_analysis)
: cost_analysis_(cost_analysis) {}
virtual ~RuntimeSimulator() = default;
float ComputeEstimatedElapsedTime(const HloLiveRange& hlo_live_range,
const AllocationSequence& allocations);
private:
const CostAnalysis* cost_analysis_;
CostAnalysis::Cache cost_analysis_cache_;
};
}
}
#endif
#include "xla/service/memory_space_assignment/simulator.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
float RuntimeSimulator::ComputeEstimatedElapsedTime(
const HloLiveRange& hlo_live_range, const AllocationSequence& allocations) {
absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>
outputs_in_alternate_memory_map;
absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>
operands_in_alternate_memory_map;
for (auto& allocation : allocations) {
if (!allocation->is_copy_allocation()) {
if (allocation->memory_space() == MemorySpace::kAlternate) {
const HloInstruction* defining_instruction =
allocation->defining_position().instruction;
outputs_in_alternate_memory_map[defining_instruction].push_back(
allocation->defining_position().index);
}
}
for (auto& hlo_use : allocation->uses()) {
const HloInstruction* use_instruction = hlo_use.instruction;
operands_in_alternate_memory_map[use_instruction].push_back(
std::make_pair(hlo_use.operand_number, hlo_use.operand_index));
}
}
const auto& instruction_sequence =
hlo_live_range.flattened_instruction_sequence().instructions();
float total_elapsed = 0.0;
for (const HloInstruction* instruction : instruction_sequence) {
if (instruction->opcode() == HloOpcode::kWhile) {
continue;
}
std::vector<ShapeIndex> outputs_in_alternate_memory;
auto output_it = outputs_in_alternate_memory_map.find(instruction);
if (output_it != outputs_in_alternate_memory_map.end()) {
outputs_in_alternate_memory = output_it->second;
}
std::vector<std::pair<int64_t, ShapeIndex>> operands_in_alternate_memory;
auto operand_it = operands_in_alternate_memory_map.find(instruction);
if (operand_it != operands_in_alternate_memory_map.end()) {
operands_in_alternate_memory = operand_it->second;
}
float instruction_elapsed_per_invoke =
cost_analysis_->GetInstructionElapsedInAlternateMemory(
*instruction, operands_in_alternate_memory,
outputs_in_alternate_memory);
float total_trip_count = cost_analysis_->CalculateNestTripCount(
instruction, &cost_analysis_cache_);
total_elapsed += total_trip_count * instruction_elapsed_per_invoke;
}
return total_elapsed;
}
}
} | #include "xla/service/memory_space_assignment/simulator.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::CostAnalysis;
using memory_space_assignment::CostAnalysisOptions;
using memory_space_assignment::RuntimeSimulator;
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class MemorySpaceAssignmentSimulatorTest : public HloTestBase {
protected:
absl::Status Initialize(const HloModule* module) {
HloCostAnalysis::Options tpu_device_options;
tpu_device_options.shape_size = ShapeSize;
tpu_device_options.set_flops_per_second(1);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(tpu_device_options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<memory_space_assignment::HloCostAnalysisCosts>(
*hlo_cost_analysis_);
CostAnalysisOptions _options;
TF_ASSIGN_OR_RETURN(
cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_, _options, *module));
runtime_simulator_ =
std::make_unique<xla::memory_space_assignment::RuntimeSimulator>(
cost_analysis_.get());
return absl::OkStatus();
}
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<memory_space_assignment::HloCostAnalysisCosts>
hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
std::unique_ptr<RuntimeSimulator> runtime_simulator_;
};
TEST_F(MemorySpaceAssignmentSimulatorTest, SingleLayerNestedLoop) {
absl::string_view hlo_string =
R"(HloModule module, is_scheduled=true
%body {
%constant.1 = s32[] constant(1)
%param = (s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%increment = s32[] add(s32[] %count, s32[] %constant.1)
ROOT %loop_result = (s32[]) tuple(%increment)
}
%condition {
%param = (s32[]) parameter(0)
%constant.42 = s32[] constant(42)
%condition_input = s32[] get-tuple-element(%param), index=0
ROOT %greater = pred[] compare(s32[] %constant.42, s32[] %condition_input), direction=GT
}
ENTRY Entry {
%dummy_input = s32[] parameter(0)
%constant.0 = s32[] constant(0)
ROOT %while = (s32[]) while(tuple(%constant.0)), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(Initialize(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
memory_space_assignment::AllocationSequence allocations;
float expected_elapsed_time = 84;
EXPECT_EQ(runtime_simulator_->ComputeEstimatedElapsedTime(*hlo_live_range,
allocations),
expected_elapsed_time);
}
}
} |
1,995 | cpp | tensorflow/tensorflow | memory_space_assignment | third_party/xla/xla/service/memory_space_assignment/memory_space_assignment.cc | third_party/xla/xla/service/memory_space_assignment/memory_space_assignment_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_MEMORY_SPACE_ASSIGNMENT_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_MEMORY_SPACE_ASSIGNMENT_H_
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
class PresetAssignments {
public:
struct AssignmentInformation {
int64_t size;
HeapSimulatorTrace heap_simulator_trace;
};
PresetAssignments() = default;
void add_chunk(const HloPosition& position,
const HeapSimulator::Chunk& chunk) {
chunks_.emplace_back(position, chunk);
}
void add_scoped_allocation_chunk(HloInstruction* instruction,
const HeapSimulator::Chunk& chunk) {
scoped_allocation_chunks_.emplace_back(instruction, chunk);
}
AssignmentInformation* assignment_information_for_space(
int64_t memory_space) {
for (auto& space_and_info : assignment_info_) {
if (space_and_info.first == memory_space) {
return &space_and_info.second;
}
}
assignment_info_.emplace_back(memory_space, AssignmentInformation());
return &assignment_info_.back().second;
}
absl::Span<const std::pair<HloPosition, HeapSimulator::Chunk>> chunks()
const {
return chunks_;
}
absl::Span<const std::pair<HloInstruction*, HeapSimulator::Chunk>>
scoped_allocation_chunks() const {
return scoped_allocation_chunks_;
}
absl::Span<const std::pair<int64_t, AssignmentInformation>>
assignment_informations() const {
return assignment_info_;
}
std::string buffer_info_str() const { return buffer_info_str_; }
std::string allocation_info_str() const { return allocation_info_str_; }
std::string instruction_schedule_str() const {
return instruction_schedule_str_;
}
private:
std::vector<std::pair<HloPosition, HeapSimulator::Chunk>> chunks_;
std::vector<std::pair<HloInstruction*, HeapSimulator::Chunk>>
scoped_allocation_chunks_;
std::vector<std::pair<int64_t, AssignmentInformation>> assignment_info_;
std::string buffer_info_str_;
std::string allocation_info_str_;
std::string instruction_schedule_str_;
};
class MemorySpaceAssignment {
public:
struct AsyncCopyStats {
int64_t max_outstanding_async_copies = 0;
int64_t num_prefetches = 0;
int64_t num_sliced_prefetches = 0;
int64_t num_sliced_prefetch_slices = 0;
int64_t prefetch_bytes = 0;
int64_t num_evictions = 0;
int64_t eviction_bytes = 0;
};
virtual ~MemorySpaceAssignment() = default;
static absl::StatusOr<std::unique_ptr<PresetAssignments>> Run(
HloModule* module, const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis, const Options& options);
absl::StatusOr<AsyncCopyStats> CalculateAsyncCopyStats() const;
absl::Status VerifyAndExportHeapSimulatorTrace();
protected:
virtual absl::StatusOr<std::unique_ptr<PresetAssignments>>
RunMemorySpaceAssignment(const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis);
virtual absl::Status FindAllocationSequence(
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis);
const Options& options() const { return options_; }
MemorySpaceAssignment(HloModule* module, const Options& options,
const HloLiveRange& hlo_live_range)
: module_(module),
options_(options),
flattened_instructions_(hlo_live_range.flattened_instruction_sequence()
.instructions()
.begin(),
hlo_live_range.flattened_instruction_sequence()
.instructions()
.end()),
computations_in_schedule_(),
preset_assignments_(std::make_unique<PresetAssignments>()) {
for (const auto& computation_and_bound :
hlo_live_range.computation_span_times()) {
computations_in_schedule_.insert(computation_and_bound.first);
}
}
AllocationSequence allocations_;
HloModule* module() { return module_; }
private:
absl::Status Process(const HloLiveRange& hlo_live_range);
absl::Status SimplifyGraph();
absl::Status FixSchedule();
absl::Status ExportAndColorBuffers();
void ScheduleAsynchronousCopies();
void RemoveAssignmentForInstruction(const HloInstruction* instruction);
HloModule* module_;
const Options& options_;
std::vector<HloInstruction*> flattened_instructions_;
absl::flat_hash_set<const HloComputation*> computations_in_schedule_;
std::unique_ptr<PresetAssignments> preset_assignments_;
std::vector<std::pair<HloPosition, HeapSimulator::Chunk>>
alternate_memory_assignments_;
std::vector<std::pair<HloInstruction*, HeapSimulator::Chunk>>
scoped_memory_assignments_;
int64_t alternate_memory_size_ = 0;
absl::flat_hash_map<int64_t, std::vector<HloInstruction*>> schedule_after_;
absl::flat_hash_map<int64_t, std::vector<HloInstruction*>> schedule_before_;
};
}
}
#endif
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/algorithm.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/simulator.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
namespace {
absl::Status InsertInstructionAndEnsureOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions);
absl::Status EnsureInstructionAndOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) {
if (inserted_instructions->contains(new_instruction)) {
return absl::OkStatus();
}
return InsertInstructionAndEnsureOperandsInserted(
new_instruction, new_sequence, inserted_instructions);
}
absl::Status InsertInstructionAndEnsureOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) {
for (HloInstruction* operand : new_instruction->operands()) {
TF_RETURN_IF_ERROR(EnsureInstructionAndOperandsInserted(
operand, new_sequence, inserted_instructions));
}
VLOG(4) << "inserting: " << new_instruction->ToShortString();
new_sequence->push_back(new_instruction);
TF_RET_CHECK(inserted_instructions->insert(new_instruction).second);
return absl::OkStatus();
}
std::string InstructionScheduleToString(const HloLiveRange& hlo_live_range) {
const absl::flat_hash_map<const HloInstruction*, HloLiveRange::LogicalTime>&
instruction_schedule = hlo_live_range.instruction_schedule();
std::vector<std::pair<int64_t, const HloInstruction*>> instructions;
instructions.reserve(instruction_schedule.size());
for (const auto& instruction : instruction_schedule) {
instructions.push_back({instruction.second, instruction.first});
}
std::string instruction_schedule_str = "\n";
absl::c_sort(instructions);
for (auto& instruction : instructions) {
absl::StrAppend(&instruction_schedule_str,
"LogicalTime: ", instruction.first, " ",
instruction.second->ToString(), "\n");
}
return instruction_schedule_str;
}
void EnsureParentAllocationIsAvailableForCopy(CopyAllocation* copy_allocation) {
Allocation& parent_allocation = copy_allocation->mutable_prev_allocation();
parent_allocation.Extend(copy_allocation->copy_done_schedule_before());
if (parent_allocation.is_copy_allocation()) {
auto parent_copy_allocation =
tensorflow::down_cast<CopyAllocation*>(&parent_allocation);
parent_copy_allocation->set_copy_done_schedule_before(
std::min(parent_copy_allocation->copy_done_schedule_before(),
copy_allocation->start_time()));
parent_copy_allocation->set_copy_start_schedule_after(
std::min(parent_copy_allocation->copy_start_schedule_after(),
parent_copy_allocation->copy_done_schedule_before() - 1));
}
}
void MakeCopyAllocationJitForSingleUse(CopyAllocation* copy_allocation,
int64_t use_time) {
copy_allocation->set_start_time(use_time - 1);
copy_allocation->set_copy_start_schedule_after(use_time - 1);
copy_allocation->set_end_time(use_time);
copy_allocation->set_copy_done_schedule_before(use_time);
EnsureParentAllocationIsAvailableForCopy(copy_allocation);
}
int64_t GetUseTime(const HloUse& use, const HloLiveRange& hlo_live_range) {
return hlo_live_range.instruction_schedule().at(use.instruction);
}
void ProcessPrefetchesToAlternateMemory(AllocationSequence& allocations,
const HloLiveRange& hlo_live_range) {
std::vector<Allocation*> allocations_in_raw_pointers =
GetAllocationSequenceInRawPointers(allocations);
for (auto allocation : allocations_in_raw_pointers) {
if (allocation->is_copy_allocation() && allocation->is_in_alternate_mem() &&
!allocation->uses().empty()) {
CopyAllocation* prefetch =
tensorflow::down_cast<CopyAllocation*>(allocation);
std::vector<HloUse> uses = prefetch->uses();
prefetch->clear_uses();
prefetch->AddUse(uses[0]);
MakeCopyAllocationJitForSingleUse(prefetch,
GetUseTime(uses[0], hlo_live_range));
for (size_t use_index = 1; use_index < uses.size(); ++use_index) {
const HloUse& use = uses[use_index];
int64_t use_time = GetUseTime(use, hlo_live_range);
auto jit_single_use_prefetch = std::make_unique<CopyAllocation>(
prefetch->mutable_prev_allocation(), MemorySpace::kAlternate,
prefetch->chunk(), use_time - 1, use_time, use_time);
jit_single_use_prefetch->set_copy_start_schedule_after(use_time - 1);
jit_single_use_prefetch->AddUse(use);
EnsureParentAllocationIsAvailableForCopy(jit_single_use_prefetch.get());
allocations.push_back(std::move(jit_single_use_prefetch));
}
}
}
}
void MakeEvictionImmediate(CopyAllocation* eviction) {
const Allocation& parent_allocation = eviction->prev_allocation();
eviction->set_start_time(parent_allocation.start_time());
eviction->set_copy_start_schedule_after(parent_allocation.start_time());
eviction->set_copy_done_schedule_before(parent_allocation.start_time() + 1);
eviction->Extend(parent_allocation.start_time() + 1);
}
absl::flat_hash_map<Allocation*, CopyAllocation*> GetEvictionsMap(
std::vector<Allocation*>& allocations) {
absl::flat_hash_map<Allocation*, CopyAllocation*> evictions_map;
for (auto& allocation : allocations) {
if (allocation->is_copy_allocation() && allocation->is_in_default_mem()) {
auto eviction = tensorflow::down_cast<CopyAllocation*>(allocation);
Allocation& parent_allocation = eviction->mutable_prev_allocation();
if (!parent_allocation.is_copy_allocation()) {
evictions_map[&parent_allocation] = eviction;
}
}
}
return evictions_map;
}
void ProcessBuffersProducedInAlternateMemory(
AllocationSequence& allocations, const HloLiveRange& hlo_live_range) {
std::vector<Allocation*> allocations_in_raw_pointers =
GetAllocationSequenceInRawPointers(allocations);
absl::flat_hash_map<Allocation*, CopyAllocation*> evictions_map =
GetEvictionsMap(allocations_in_raw_pointers);
for (auto& [_, eviction] : evictions_map) {
MakeEvictionImmediate(eviction);
}
VLOG(2) << "AllocationSequence after making spills immediate spills\n";
XLA_LOG_LINES(2, AllocationSequenceToString(allocations, true));
for (auto allocation : allocations_in_raw_pointers) {
if (!allocation->is_copy_allocation() &&
allocation->is_in_alternate_mem()) {
std::vector<HloUse> uses = allocation->uses();
allocation->clear_uses();
allocation->set_end_time(allocation->start_time() + 1);
for (const HloUse& use : uses) {
int64_t use_time = GetUseTime(use, hlo_live_range);
if (allocation->start_time() + 1 == use_time) {
allocation->AddUse(use);
continue;
}
if (!evictions_map.contains(allocation)) {
auto eviction_unique_ptr = std::make_unique<CopyAllocation>(
*allocation, MemorySpace::kDefault, std::nullopt,
allocation->start_time(), allocation->start_time() + 1,
allocation->start_time() + 1);
eviction_unique_ptr->set_copy_start_schedule_after(
allocation->start_time());
evictions_map[allocation] = eviction_unique_ptr.get();
allocations.push_back(std::move(eviction_unique_ptr));
}
CopyAllocation* eviction = evictions_map[allocation];
auto jit_single_use_prefetch = std::make_unique<CopyAllocation>(
*eviction, MemorySpace::kAlternate, allocation->chunk(),
use_time - 1, use_time, use_time);
jit_single_use_prefetch->set_copy_start_schedule_after(use_time - 1);
jit_single_use_prefetch->AddUse(use);
EnsureParentAllocationIsAvailableForCopy(jit_single_use_prefetch.get());
allocations.push_back(std::move(jit_single_use_prefetch));
}
}
}
}
void TransformAllocationSequenceToSpill(AllocationSequence& allocations,
const HloLiveRange& hlo_live_range) {
VLOG(2) << "InstructionSchedule before transform\n";
XLA_LOG_LINES(2, InstructionScheduleToString(hlo_live_range));
VLOG(2) << "AllocationSequence before transform\n";
XLA_LOG_LINES(2, AllocationSequenceToString(allocations, true));
ProcessPrefetchesToAlternateMemory(allocations, hlo_live_range);
VLOG(2) << "AllocationSequence after processing prefetches\n";
XLA_LOG_LINES(2, AllocationSequenceToString(allocations, true));
ProcessBuffersProducedInAlternateMemory(allocations, hlo_live_range);
VLOG(2) << "AllocationSequence after processing buffers produced in kAlt\n";
XLA_LOG_LINES(2, AllocationSequenceToString(allocations, true));
SortAllocationSequence(allocations);
}
}
absl::StatusOr<MemorySpaceAssignment::AsyncCopyStats>
MemorySpaceAssignment::CalculateAsyncCopyStats() const {
AsyncCopyStats stats;
int64_t current_copies = 0;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module_));
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart ||
(instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice)) {
current_copies++;
} else if (instruction->opcode() == HloOpcode::kCopyDone ||
(instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice)) {
current_copies--;
int64_t size =
options_.size_fn(dataflow_analysis->GetUniqueValueAt(instruction));
if (instruction->shape().layout().memory_space() ==
options_.alternate_memory_space) {
++stats.num_prefetches;
stats.prefetch_bytes += size;
if (instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice) {
++stats.num_sliced_prefetch_slices;
}
} else {
++stats.num_evictions;
stats.eviction_bytes += size;
}
} else if (instruction->IsCustomCall(kConcatBitcastCustomCall)) {
++stats.num_sliced_prefetches;
}
stats.max_outstanding_async_copies =
std::max(stats.max_outstanding_async_copies, current_copies);
}
}
return stats;
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::Run(HloModule* module,
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis,
const Options& options) {
CHECK(module->has_schedule());
VLOG(3) << "Module before memory space assignment: ";
XLA_VLOG_LINES(3, module->ToString());
VLOG(3) << "Schedule: " << module->schedule().ToString();
MemorySpaceAssignment memory_space_assignment(module, options,
hlo_live_range);
return memory_space_assignment.RunMemorySpaceAssignment(hlo_live_range,
alias_analysis);
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::RunMemorySpaceAssignment(
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis) {
TF_RETURN_IF_ERROR(FindAllocationSequence(hlo_live_range, alias_analysis));
if (options_.cost_ | #include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/instruction_hoister.h"
#include "xla/service/memory_space_assignment/algorithm.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/buffer_interval_comparator.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include "xla/service/memory_space_assignment/repacking.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/service/memory_space_assignment/testing_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace memory_space_assignment {
namespace {
namespace op = xla::testing::opcode_matchers;
using Chunk = HeapSimulator::Chunk;
using ::testing::_;
using ::testing::Return;
using ::testing::UnorderedElementsAre;
constexpr int64_t kPointerSize = 8;
constexpr float kAsyncCopyBandwidth = 100;
constexpr float kAlternateMemBandwidth = 1000;
constexpr float kBytesPerSecond = 100;
constexpr float kFlopsPerSecond = 1000;
constexpr float kTranscendentalsPerSecond = 10;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
int64_t SizeFunction(const BufferValue& value) {
return ShapeSize(value.shape());
}
int64_t ReservedScopedMemoryFn(
const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex>& outputs_in_alternate_memory) {
return 0;
}
class TestBufferIntervalComparator : public BufferIntervalComparator {
public:
explicit TestBufferIntervalComparator(MsaBufferIntervalCompare compare_method)
: BufferIntervalComparator(), compare_method_(compare_method) {}
~TestBufferIntervalComparator() override = default;
std::string DescribeComparisonCriteria() const override {
return "internal to test";
}
std::string CriteriaToString(
const MsaBufferInterval& buffer_interval) override {
return "internal to test";
}
bool LessThan(const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) override {
return compare_method_(lhs, rhs);
}
private:
MsaBufferIntervalCompare compare_method_;
};
class MemorySpaceAssignmentTestBase : public HloTestBase {
protected:
const int64_t kDefaultMemorySpace = 0;
const int64_t kAlternateMemorySpace = 1;
virtual bool allocate_across_sequential_calls() const { return false; }
HloCostAnalysis::Options DefaultHloCostAnalysisOptions() {
HloCostAnalysis::Options options;
options.shape_size = ShapeSize;
options.set_flops_per_second(kFlopsPerSecond);
options.set_bytes_per_second(kBytesPerSecond);
options.set_transcendentals_per_second(kTranscendentalsPerSecond);
return options;
}
Options DefaultMemorySpaceOptions() {
Options options;
options.max_size_in_bytes = 128;
options.alignment_in_bytes = 8;
options.verify = true;
options.alternate_memory_space = kAlternateMemorySpace;
options.max_outstanding_prefetches = -1;
options.max_outstanding_evictions = -1;
options.allocate_across_sequential_calls =
allocate_across_sequential_calls();
return options;
}
CostAnalysisOptions DefaultCostAnalysisOptions() {
CostAnalysisOptions options;
options.async_copy_bandwidth_bytes_per_second = kAsyncCopyBandwidth;
options.alternate_mem_bandwidth_bytes_per_second = kAlternateMemBandwidth;
return options;
}
Options UpdateMaxAsyncCopies(Options options, int64_t max_async_copies) {
options.max_outstanding_prefetches = max_async_copies;
options.max_outstanding_evictions = max_async_copies;
return options;
}
std::unique_ptr<PresetAssignments> AssignMemorySpaceUsingCostAnalysis(
HloModule* module,
std::optional<Options> memory_space_options_override = std::nullopt,
std::optional<CostAnalysisOptions> cost_analysis_options_override =
std::nullopt,
std::optional<HloCostAnalysis::Options> hlo_cost_options_override =
std::nullopt,
std::optional<MsaSortOrderOverrides> optional_msa_sort_order_overrides =
std::nullopt) {
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
if (hlo_cost_options_override) {
hlo_cost_options = *hlo_cost_options_override;
}
HloCostAnalysis hlo_cost_analysis(hlo_cost_options);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_CHECK_OK(computation->Accept(&hlo_cost_analysis));
}
auto alias_analysis = HloAliasAnalysis::Run(module).value();
Options memory_space_options = DefaultMemorySpaceOptions();
if (memory_space_options_override) {
memory_space_options = *memory_space_options_override;
}
CostAnalysisOptions cost_analysis_options = DefaultCostAnalysisOptions();
if (cost_analysis_options_override) {
cost_analysis_options = *cost_analysis_options_override;
}
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
auto cost_analysis = CostAnalysis::Create(hlo_cost_analysis_costs,
cost_analysis_options, *module)
.value();
memory_space_options.cost_analysis = cost_analysis.get();
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis, 0.8,
1.5,
10.0,
memory_space_options.max_size_in_bytes));
MsaSortOrderOverrides msa_sort_order_overrides;
if (optional_msa_sort_order_overrides.has_value()) {
msa_sort_order_overrides = optional_msa_sort_order_overrides.value();
}
MemoryBoundednessBufferIntervalComparator comparator(
*cost_analysis, &cache_, msa_sort_order_overrides);
return AssignMemorySpace(
module, memory_space_options,
[&comparator](const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) {
return comparator.LessThan(lhs, rhs);
},
&prefetch_interval_picker);
}
std::unique_ptr<PresetAssignments> AssignMemorySpace(
HloModule* module, std::optional<Options> options_override = std::nullopt,
int64_t max_prefetch_interval = 10, int64_t min_prefetch_interval = 2) {
InstructionHoister instruction_hoister;
TF_CHECK_OK(instruction_hoister.Run(module).status());
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(
min_prefetch_interval, max_prefetch_interval);
return AssignMemorySpace(module, options_override,
{},
&prefetch_interval_picker);
}
std::unique_ptr<PresetAssignments> AssignMemorySpace(
HloModule* module, std::optional<Options> options_override,
std::optional<MsaBufferIntervalCompare> buffer_interval_compare,
PrefetchIntervalPicker* prefetch_interval_picker) {
auto status_or = AssignMemorySpaceAndReturnStatus(module, options_override,
buffer_interval_compare,
prefetch_interval_picker);
TF_EXPECT_OK(status_or.status());
return std::move(status_or.value());
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
AssignMemorySpaceAndReturnStatus(
HloModule* module, std::optional<Options> options_override,
std::optional<MsaBufferIntervalCompare> buffer_interval_compare,
PrefetchIntervalPicker* prefetch_interval_picker) {
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
auto is_allowed_in_alternate_mem = [](const HloValue& value) {
HloInstruction* instruction = value.instruction();
HloComputation* computation = instruction->parent();
bool in_entry_computation =
(computation == computation->parent()->entry_computation());
if (in_entry_computation &&
instruction->opcode() == HloOpcode::kParameter) {
return false;
}
return true;
};
bool check_parameters_in_default_memory = true;
for (const HloInstruction* parameter :
module->entry_computation()->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
parameter->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.has_layout() &&
subshape.layout().memory_space() == kAlternateMemorySpace) {
check_parameters_in_default_memory = false;
}
});
}
Options options = DefaultMemorySpaceOptions();
if (options_override) {
options = *options_override;
}
std::unique_ptr<TestBufferIntervalComparator> test_comparator;
if (buffer_interval_compare.has_value()) {
test_comparator = std::make_unique<TestBufferIntervalComparator>(
*buffer_interval_compare);
options.buffer_interval_comparator = test_comparator.get();
}
options.prefetch_interval_picker = prefetch_interval_picker;
options.size_fn = size_fn;
if (options.is_allowed_in_alternate_mem_fn == nullptr) {
options.is_allowed_in_alternate_mem_fn = is_allowed_in_alternate_mem;
}
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
TF_ASSIGN_OR_RETURN(std::unique_ptr<PresetAssignments> preset_assignments,
MemorySpaceAssignment::Run(module, *hlo_live_range,
*alias_analysis, options));
if (check_parameters_in_default_memory) {
CheckParametersInDefaultMemory(module);
}
CheckRootInDefaultMemory(module);
CheckPresetAssignments(preset_assignments.get());
return preset_assignments;
}
void CheckPresetAssignments(const PresetAssignments* preset_assignments) {
std::set<HloPosition> positions_in_preset_assignments;
for (auto& position_and_chunk : preset_assignments->chunks()) {
HloPosition position = position_and_chunk.first;
EXPECT_EQ(positions_in_preset_assignments.find(position),
positions_in_preset_assignments.end());
positions_in_preset_assignments.insert(position);
const Shape& subshape =
ShapeUtil::GetSubshape(position.instruction->shape(), position.index);
EXPECT_EQ(subshape.layout().memory_space(), kAlternateMemorySpace)
<< "Exported position is not in alternate mem: "
<< position.ToString();
}
}
void CheckParametersInDefaultMemory(const HloModule* module) {
const HloComputation* entry_computation = module->entry_computation();
for (const HloInstruction* parameter :
entry_computation->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
parameter->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.has_layout()) {
EXPECT_NE(subshape.layout().memory_space(), kAlternateMemorySpace)
<< "Parameter not in default memory: "
<< parameter->ToString();
}
});
}
}
void CheckRootInDefaultMemory(const HloModule* module) {
const HloInstruction* root =
module->entry_computation()->root_instruction();
if (root->shape().IsArray()) {
EXPECT_EQ(root->shape().layout().memory_space(), kDefaultMemorySpace);
}
}
struct OutstandingAsyncCopies {
int64_t max_copies;
int64_t max_prefetches;
int64_t max_evictions;
};
OutstandingAsyncCopies CountMaximumOutstandingAsyncCopies(
const HloModule& module) {
OutstandingAsyncCopies copies{0, 0, 0};
int64_t current_copies = 0;
int64_t current_prefetches = 0;
int64_t current_evictions = 0;
for (HloInstruction* instruction : module.schedule()
.sequence(module.entry_computation())
.instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart) {
current_copies++;
if (ShapeUtil::GetSubshape(instruction->shape(), {0})
.layout()
.memory_space() == kAlternateMemorySpace) {
current_prefetches++;
} else {
current_evictions++;
}
} else if (instruction->opcode() == HloOpcode::kCopyDone) {
current_copies--;
if (instruction->shape().layout().memory_space() ==
kAlternateMemorySpace) {
current_prefetches--;
} else {
current_evictions--;
}
}
copies.max_copies = std::max(copies.max_copies, current_copies);
copies.max_prefetches =
std::max(copies.max_prefetches, current_prefetches);
copies.max_prefetches = std::max(copies.max_evictions, current_evictions);
}
return copies;
}
int64_t GetAlternateMemoryOffset(const PresetAssignments& preset_assignments,
const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
const HloModule* module = instruction->GetModule();
auto alias_analysis = HloAliasAnalysis::Run(module).value();
HloBuffer& buffer = alias_analysis->GetUniqueBufferAt(instruction, index);
for (auto& pos_and_chunk : preset_assignments.chunks()) {
for (auto& value : buffer.values()) {
if (pos_and_chunk.first == value->defining_position()) {
return pos_and_chunk.second.offset;
}
}
}
return -1;
}
std::unique_ptr<HloModule> CreateEvictAndPrefetchModule() {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, tanh, a, b, c, d, e, f, g, h, i,
j, k, l, m, n, o, add});
TF_CHECK_OK(module->set_schedule(schedule));
return module;
}
CostAnalysis::Cache cache_;
};
class MemorySpaceAssignmentTest : public MemorySpaceAssignmentTestBase,
public ::testing::WithParamInterface<bool> {
protected:
bool allocate_across_sequential_calls() const override { return GetParam(); }
};
TEST_P(MemorySpaceAssignmentTest, ParameterOnly) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
}
TEST_P(MemorySpaceAssignmentTest, Simple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
HloInstruction* sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add, sub));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, add, sub, mul});
TF_CHECK_OK(module->set_schedule(schedule));
auto preset_assignments = AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
EXPECT_THAT(mul, op::ShapeWithLayout(shape));
EXPECT_THAT(add, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(sub, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_EQ(preset_assignments->chunks().size(), 3);
EXPECT_EQ(preset_assignments->assignment_informations().size(), 1);
EXPECT_NE(preset_assignments->chunks()[0].second.offset,
preset_assignments->chunks()[1].second.offset);
}
TEST_P(MemorySpaceAssignmentTest, NegateChain) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[2], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_P(MemorySpaceAssignmentTest, AlwaysSpillJitPrefetchTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p0)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
ROOT add = f32[2,3]{1,0} add(negate6, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* add = FindInstruction(module.get(), "add");
const HloInstruction* cd = add->operand(1);
EXPECT_THAT(cd, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add),
live_range->instruction_schedule().at(cd) + 1);
const HloInstruction* cs = cd->operand(0);
EXPECT_THAT(cs, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add),
live_range->instruction_schedule().at(cs) + 2);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
}
TEST_P(MemorySpaceAssignmentTest, AlwaysSpillPrefetchForSecondUseTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p0)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
add0 = f32[2,3]{1,0} add(negate5, negate0)
ROOT add1 = f32[2,3]{1,0} add(add0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* add1 = FindInstruction(module.get(), "add1");
const HloInstruction* cd1 = add1->operand(1);
EXPECT_THAT(cd1, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add1),
live_range->instruction_schedule().at(cd1) + 1);
const HloInstruction* cs1 = cd1->operand(0);
EXPECT_THAT(cs1, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add1),
live_range->instruction_schedule().at(cs1) + 2);
EXPECT_EQ(cd1->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* add0 = FindInstruction(module.get(), "add0");
const HloInstruction* cd0 = add0->operand(1);
EXPECT_THAT(cd0, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add0),
live_range->instruction_schedule().at(cd0) + 1);
const HloInstruction* cs0 = cd0->operand(0);
EXPECT_THAT(cs0, op::CopyStart() |
1,996 | cpp | tensorflow/tensorflow | best_fit_repacker | third_party/xla/xla/service/memory_space_assignment/best_fit_repacker.cc | third_party/xla/xla/service/memory_space_assignment/best_fit_repacker_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_BEST_FIT_REPACKER_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_BEST_FIT_REPACKER_H_
#include <cstdint>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/memory_space_assignment/repacking.h"
namespace xla {
namespace memory_space_assignment {
class MemorySpaceAssignmentBestFitRepacker
: public MemorySpaceAssignmentRepacker {
public:
using BufferInterval =
GlobalDecreasingSizeBestFitHeap<AllocationBlock>::BufferInterval;
using BufferIntervalCompare =
GlobalDecreasingSizeBestFitHeap<AllocationBlock>::BufferIntervalCompare;
struct BestFitRepackOptions {
bool validate = false;
BufferIntervalCompare buffer_interval_compare = nullptr;
};
MemorySpaceAssignmentBestFitRepacker(
int64_t max_size, int64_t alignment,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type)
: MemorySpaceAssignmentRepacker(max_size, alignment),
options_(BestFitRepackOptions()),
slice_time_permutation_iterator_type_(
slice_time_permutation_iterator_type) {}
MemorySpaceAssignmentBestFitRepacker(
int64_t max_size, int64_t alignment,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type,
BestFitRepackOptions options)
: MemorySpaceAssignmentRepacker(max_size, alignment),
options_(std::move(options)),
slice_time_permutation_iterator_type_(
slice_time_permutation_iterator_type) {}
absl::StatusOr<bool> Repack(
absl::Span<AllocationBlock*> allocations) override;
private:
BestFitRepackOptions options_;
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type_;
};
}
}
#endif
#include "xla/service/memory_space_assignment/best_fit_repacker.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/memory_space_assignment/repacking.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
bool IsSliced(const AllocationBlock* block) {
return block->original_slice_data.has_value();
}
template <typename T>
std::vector<const AllocationBlock*> SortAllocationBlocks(const T& container) {
std::vector<const AllocationBlock*> result;
result.insert(result.end(), container.begin(), container.end());
absl::c_sort(
result, [](const AllocationBlock* lhs, const AllocationBlock* rhs) {
return std::make_tuple(lhs->inclusive_start_time, lhs->end_time,
lhs->initial_offset, lhs->size) <
std::make_tuple(rhs->inclusive_start_time, rhs->end_time,
rhs->initial_offset, rhs->size);
});
return result;
}
const SlicedAllocationData* GetSlicedAllocationDataPointer(
const std::optional<SlicedAllocationData>& sliced_allocation_data) {
if (!sliced_allocation_data.has_value()) {
return nullptr;
}
return &(*sliced_allocation_data);
}
class BestFitRepacker
: public GlobalDecreasingSizeBestFitHeap<AllocationBlock> {
public:
BestFitRepacker(
const memory_space_assignment::MemorySpaceAssignmentBestFitRepacker::
BestFitRepackOptions& options,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type,
int64_t max_size, int64_t alignment)
: GlobalDecreasingSizeBestFitHeap<AllocationBlock>(
alignment, kCustom,
(options.buffer_interval_compare ? options.buffer_interval_compare
: DefaultBufferIntervalCompare()),
slice_time_permutation_iterator_type),
validate_(options.validate),
max_size_(max_size) {}
void ImportAllocationBlocks(absl::Span<AllocationBlock*> allocations) {
allocation_blocks_ = allocations;
for (AllocationBlock* allocation_block : allocation_blocks_) {
bool need_allocation = true;
CHECK_NE(allocation_block->next_colocated, nullptr);
for (AllocationBlock* colocated = allocation_block->next_colocated;
colocated != allocation_block;
colocated = colocated->next_colocated) {
auto aliased_it = full_buffer_interval_map_.find(colocated);
if (aliased_it != full_buffer_interval_map_.end() &&
aliased_it->second.need_allocation) {
aliased_it->second.colocations.push_back(allocation_block);
need_allocation = false;
break;
}
}
full_buffer_interval_map_.insert(
std::make_pair(allocation_block,
BufferInterval{allocation_block,
allocation_block->size,
allocation_block->inclusive_start_time,
allocation_block->end_time,
{},
need_allocation}));
}
for (AllocationBlock* allocation_block : allocation_blocks_) {
BufferInterval& full_buffer_interval =
full_buffer_interval_map_[allocation_block];
SlicedBufferInterval& sliced_buffer_interval =
sliced_buffer_interval_map_
.insert(std::make_pair(
allocation_block, SlicedBufferInterval::CreateMutableInterval(
full_buffer_interval)))
.first->second;
if (IsSliced(allocation_block)) {
const SlicedAllocationData& original_slice_data =
allocation_block->original_slice_data.value();
CHECK(!original_slice_data.slices_sorted_by_offset.empty());
sliced_buffer_interval.Slice(original_slice_data.SizesSortedByOffset());
sliced_buffer_interval.UpdateInclusiveSliceStartTimes(
original_slice_data.SortedInclusiveStartTimes());
}
buffer_intervals_[allocation_block] =
sliced_buffer_interval.IntervalForMakeFreeChunks(
sliced_buffer_interval.num_slices() - 1);
}
CHECK_EQ(allocation_blocks_.size(), buffer_intervals_.size());
CHECK_EQ(allocation_blocks_.size(), full_buffer_interval_map_.size());
CHECK_EQ(allocation_blocks_.size(), sliced_buffer_interval_map_.size());
VLOG(2) << [&]() -> std::string {
int sliced_blocks = 0;
int colocation_sets = 0;
int colocation_sets_with_multiple_sliced_blocks = 0;
absl::flat_hash_set<const AllocationBlock*> seen_blocks;
for (const auto& allocation_and_buffer_interval : buffer_intervals_) {
const AllocationBlock* block = allocation_and_buffer_interval.first;
const BufferInterval& min_buffer_interval =
allocation_and_buffer_interval.second;
if (IsSliced(block)) {
++sliced_blocks;
}
if (seen_blocks.contains(block)) {
continue;
}
seen_blocks.insert(block);
++colocation_sets;
int num_sliced_colocations = (IsSliced(block) ? 1 : 0);
for (const AllocationBlock* colocation :
GetTransitiveColocations(min_buffer_interval)) {
seen_blocks.insert(colocation);
if (IsSliced(colocation)) {
++num_sliced_colocations;
}
}
if (num_sliced_colocations > 1) {
++colocation_sets_with_multiple_sliced_blocks;
}
}
return absl::StrCat(
"Imported repacking stats: num_blocks=", allocation_blocks_.size(),
"; num_sliced_blocks=", sliced_blocks,
"; num_colocation_sets=", colocation_sets,
"; num_colocation_sets_with_multiple_sliced_blocks=",
colocation_sets_with_multiple_sliced_blocks);
}();
}
BufferIntervalCompare DefaultBufferIntervalCompare() const {
return LessThanByKey([this](const BufferInterval& x) {
const BufferInterval& full_buffer_interval =
full_buffer_interval_map_.at(x.buffer);
int64_t full_buffer_interval_end = full_buffer_interval.end;
for (auto colocation : GetTransitiveColocations(x)) {
full_buffer_interval_end =
std::max(full_buffer_interval_end,
full_buffer_interval_map_.at(colocation).end);
}
return std::make_tuple(
full_buffer_interval.start - full_buffer_interval_end,
-full_buffer_interval.size, std::cref(*full_buffer_interval.buffer));
});
}
void CommitChunks(const AllocationBlock* allocation_block,
const std::vector<Chunk>& chunks) {
VLOG(3) << "Committing repack chunks for " << allocation_block->ToString();
int64_t new_offset = -1;
std::optional<SlicedAllocationData> repacked_slice_data = std::nullopt;
if (IsSliced(allocation_block)) {
const SlicedAllocationData& original_slice_data =
allocation_block->original_slice_data.value();
CHECK_EQ(chunks.size(),
original_slice_data.slices_sorted_by_offset.size());
repacked_slice_data = SlicedAllocationData();
repacked_slice_data->slices_sorted_by_offset.reserve(chunks.size());
std::vector<int64_t> sorted_inclusive_start_times =
original_slice_data.SortedInclusiveStartTimes();
for (int i = 0; i < chunks.size(); ++i) {
const Chunk& chunk = chunks[i];
int64_t start_time = sorted_inclusive_start_times[i];
result_.heap_size = result_.UpdatedHeapSize(chunk);
VLOG(3) << "Adding sliced chunk " << chunk.ToString() << " at ["
<< start_time << ", " << allocation_block->end_time << "]";
interval_tree_.Add(start_time, allocation_block->end_time, chunk);
new_offset = (new_offset == -1 ? chunk.offset
: std::min(new_offset, chunk.offset));
repacked_slice_data->slices_sorted_by_offset.push_back(
AllocatedSlice({chunk.size, chunk.offset, start_time}));
}
absl::c_sort(repacked_slice_data->slices_sorted_by_offset,
[](const AllocatedSlice& lhs, const AllocatedSlice& rhs) {
return lhs.offset < rhs.offset;
});
} else {
CHECK_EQ(chunks.size(), 1);
new_offset = chunks.front().offset;
result_.heap_size = result_.UpdatedHeapSize(chunks.front());
VLOG(3) << "Adding unsliced chunk " << chunks.front().ToString()
<< " at [" << allocation_block->inclusive_start_time << ", "
<< allocation_block->end_time << ")";
interval_tree_.Add(allocation_block->inclusive_start_time,
allocation_block->end_time, chunks.front());
}
CHECK_NE(new_offset, -1);
CHECK(!new_offsets_.contains(allocation_block));
new_offsets_[allocation_block] = new_offset;
if (repacked_slice_data.has_value()) {
CHECK(IsSliced(allocation_block));
CHECK(!new_repacked_slicing_.contains(allocation_block));
new_repacked_slicing_[allocation_block] = *repacked_slice_data;
}
}
struct SlicedColocationData {
SlicedBufferInterval* sliced_buffer_interval;
SlicedAllocationFinder sliced_allocation_finder;
std::vector<Chunk> chunks;
};
void FindAndCommitChunks(BufferInterval* min_buffer_interval) {
const AllocationBlock* allocation_block = min_buffer_interval->buffer;
SlicedBufferInterval& sliced_buffer_interval =
sliced_buffer_interval_map_.at(allocation_block);
int64_t max_colocation_size = GetMaxColocationSize(*min_buffer_interval);
absl::flat_hash_map<const AllocationBlock*, SlicedColocationData>
sliced_buffer_map;
for (auto colocation :
SortAllocationBlocks(GetTransitiveColocations(*min_buffer_interval))) {
if (IsSliced(colocation)) {
SlicedBufferInterval& colocation_sliced_buffer_interval =
sliced_buffer_interval_map_.at(colocation);
SlicedAllocationFinder sliced_colocation_finder =
CreateSlicedAllocationFinder(
colocation_sliced_buffer_interval, max_colocation_size,
-1,
SliceTimePermutationIterator::CreateForRepack(
slice_time_permutation_iterator_type(),
GetSlicedAllocationDataPointer(
colocation->original_slice_data)),
&SlicedAllocationFinder::AllOffsetsAllowed);
sliced_buffer_map.insert(std::make_pair(
colocation,
SlicedColocationData{&colocation_sliced_buffer_interval,
std::move(sliced_colocation_finder),
{}}));
}
}
auto is_offset_allowed = [this, &sliced_buffer_map](int64_t offset) {
for (auto& block_and_colocation_data : sliced_buffer_map) {
SlicedColocationData& sliced_colocation_data =
block_and_colocation_data.second;
auto colocation_chunks =
sliced_colocation_data.sliced_allocation_finder.FindForOffset(
offset);
colocation_chunks = PostProcessFindChunkCandidatesResult(
*sliced_colocation_data.sliced_buffer_interval,
std::move(colocation_chunks));
if (colocation_chunks.empty()) {
return false;
}
sliced_colocation_data.chunks = std::move(colocation_chunks);
}
return true;
};
SlicedAllocationFinder finder = CreateSlicedAllocationFinder(
sliced_buffer_interval, max_colocation_size, -1,
SliceTimePermutationIterator::CreateForRepack(
slice_time_permutation_iterator_type(),
GetSlicedAllocationDataPointer(
allocation_block->original_slice_data)),
is_offset_allowed);
std::vector<Chunk> chunks = PostProcessFindChunkCandidatesResult(
sliced_buffer_interval, finder.Find());
int64_t min_offset =
absl::c_min_element(chunks, [](const Chunk& lhs, const Chunk& rhs) {
return lhs.offset < rhs.offset;
})->offset;
CommitChunks(allocation_block, chunks);
for (auto colocation : GetTransitiveColocations(*min_buffer_interval)) {
if (IsSliced(colocation)) {
CommitChunks(colocation, sliced_buffer_map.at(colocation).chunks);
} else {
const BufferInterval& colocation_full_buffer_interval =
full_buffer_interval_map_[colocation];
CommitChunks(colocation,
{Chunk::FromOffsetSize(
min_offset, colocation_full_buffer_interval.size)});
}
}
}
void AddToChunkMap(const AllocationBlock* buffer, Chunk chunk) override {
LOG(FATAL) << "We should never get here.";
}
absl::StatusOr<Result> Finish() override {
std::vector<BufferInterval> sorted_buffer_intervals =
GetSortedBufferIntervals();
for (auto& buffer_interval : sorted_buffer_intervals) {
if (!buffer_interval.need_allocation) {
continue;
}
FindAndCommitChunks(&buffer_interval);
}
Result result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(result_);
return result;
}
struct TimedChunk {
std::string id;
const AllocationBlock* block;
int64_t start_inclusive;
int64_t end_inclusive;
Chunk chunk;
bool Overlaps(const TimedChunk& timed_chunk) {
if (timed_chunk.start_inclusive > end_inclusive ||
timed_chunk.end_inclusive < start_inclusive) {
return false;
}
return chunk.OverlapsWith(timed_chunk.chunk);
}
};
void DebuggingValidate() {
std::vector<TimedChunk> timed_chunks;
for (const AllocationBlock* block : allocation_blocks_) {
if (IsSliced(block)) {
for (int i = 0;
i < block->repacked_slice_data->slices_sorted_by_offset.size();
++i) {
const AllocatedSlice& slice =
block->repacked_slice_data->slices_sorted_by_offset[i];
timed_chunks.push_back(
TimedChunk{absl::StrCat(((int64_t)block), "_slice_", i), block,
slice.inclusive_start_time, block->end_time,
Chunk::FromOffsetSize(slice.offset, slice.size)});
}
} else {
timed_chunks.push_back(
TimedChunk{absl::StrCat(((int64_t)block)), block,
block->inclusive_start_time, block->end_time,
Chunk::FromOffsetSize(block->offset, block->size)});
}
}
bool overlap_found = false;
for (int i = 0; i < timed_chunks.size(); ++i) {
for (int j = i + 1; j < timed_chunks.size(); ++j) {
if (timed_chunks[i].Overlaps(timed_chunks[j])) {
overlap_found = true;
LOG(ERROR) << "Allocation block overlap\n"
<< " " << timed_chunks[i].block->ToString()
<< "\n " << timed_chunks[j].block->ToString();
}
}
}
if (overlap_found) {
LOG(FATAL) << "Allocation overlap found";
}
}
bool Repack() {
TF_CHECK_OK(Finish().status());
bool success = result_.heap_size <= max_size_;
if (!success) {
VLOG(1) << "Repacking unsuccessful with heap size " << result_.heap_size;
return false;
}
for (AllocationBlock* block : allocation_blocks_) {
CHECK(new_offsets_.contains(block));
block->offset = new_offsets_[block];
if (!IsSliced(block)) {
continue;
}
CHECK(new_repacked_slicing_.contains(block));
block->repacked_slice_data = std::move(new_repacked_slicing_[block]);
}
if (validate_) {
DebuggingValidate();
}
if (VLOG_IS_ON(2)) {
for (AllocationBlock* block : allocation_blocks_) {
VLOG(2) << "AllocationBlock after repacking: " << block->ToString();
}
}
VLOG(1) << "Repacking successful with heap size " << result_.heap_size;
return true;
}
private:
bool validate_ = false;
int64_t max_size_;
absl::Span<AllocationBlock*> allocation_blocks_;
absl::flat_hash_map<const AllocationBlock*, BufferInterval>
full_buffer_interval_map_;
absl::flat_hash_map<const AllocationBlock*, SlicedBufferInterval>
sliced_buffer_interval_map_;
absl::flat_hash_map<const AllocationBlock*, int64_t> new_offsets_;
absl::flat_hash_map<const AllocationBlock*, SlicedAllocationData>
new_repacked_slicing_;
};
}
namespace memory_space_assignment {
absl::StatusOr<bool> MemorySpaceAssignmentBestFitRepacker::Repack(
absl::Span<AllocationBlock*> allocations) {
BestFitRepacker best_fit_repacker = BestFitRepacker(
options_, slice_time_permutation_iterator_type_, max_size_, alignment_);
best_fit_repacker.ImportAllocationBlocks(allocations);
return best_fit_repacker.Repack();
}
}
} | #include "xla/service/memory_space_assignment/best_fit_repacker.h"
#include <cstdint>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/memory_space_assignment/repacking.h"
#include "tsl/platform/test.h"
namespace xla {
class MemorySpaceAssignmentBestFitRepackerTest : public ::testing::Test {
protected:
MemorySpaceAssignmentBestFitRepackerTest()
: repacker_(100, 1, SliceTimePermutationIterator::Ty::kAll, options_) {}
AllocationBlock* MakeAllocationBlock(int64_t start_time, int64_t end_time,
int64_t size,
int64_t initial_offset = -1) {
allocation_blocks_.push_back(
{start_time, end_time, size, -1, initial_offset,
static_cast<int64_t>(allocation_blocks_.size())});
AllocationBlock* block = &allocation_blocks_.back();
block->next_colocated = block;
return block;
}
std::list<AllocationBlock> allocation_blocks_;
memory_space_assignment::MemorySpaceAssignmentBestFitRepacker::
BestFitRepackOptions options_{true,
nullptr};
memory_space_assignment::MemorySpaceAssignmentBestFitRepacker repacker_;
};
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, Simple) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 15);
EXPECT_EQ(allocation_blocks[1]->offset, 0);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, Colocation) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 2, 10));
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks[0]->next_colocated = allocation_blocks[1];
allocation_blocks[1]->next_colocated = allocation_blocks[0];
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 15);
EXPECT_EQ(allocation_blocks[1]->offset, 15);
EXPECT_EQ(allocation_blocks[2]->offset, 0);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, TooLarge) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(12, 22, 50));
allocation_blocks.push_back(MakeAllocationBlock(10, 18, 20));
EXPECT_FALSE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, -1);
EXPECT_EQ(allocation_blocks[1]->offset, -1);
EXPECT_EQ(allocation_blocks[2]->offset, -1);
EXPECT_EQ(allocation_blocks[3]->offset, -1);
EXPECT_EQ(allocation_blocks[4]->offset, -1);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, ColocationDifferentSizes) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 2, 5));
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks[0]->next_colocated = allocation_blocks[1];
allocation_blocks[1]->next_colocated = allocation_blocks[0];
allocation_blocks.push_back(MakeAllocationBlock(9, 11, 2));
allocation_blocks.push_back(MakeAllocationBlock(1, 2, 2));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_EQ(allocation_blocks[2]->offset, 10);
EXPECT_EQ(allocation_blocks[3]->offset, 5);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, RepackedSlicesFit) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 15, 2));
allocation_blocks.push_back(MakeAllocationBlock(11, 21, 3));
allocation_blocks.push_back(MakeAllocationBlock(16, 25, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 16}, AllocatedSlice{2, -1, 22}}});
allocation_blocks.push_back(MakeAllocationBlock(26, 33, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 26}, AllocatedSlice{2, -1, 30}}});
allocation_blocks.push_back(MakeAllocationBlock(19, 25, 2));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{1, -1, 19}, AllocatedSlice{1, -1, 22}}});
allocation_blocks.push_back(MakeAllocationBlock(26, 29, 2));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 2);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 0);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[2]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 0, 16}, AllocatedSlice{2, 2, 22}}})));
EXPECT_EQ(allocation_blocks[3]->offset, 0);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 0, 26}, AllocatedSlice{2, 2, 30}}})));
EXPECT_EQ(allocation_blocks[4]->offset, 4);
ASSERT_TRUE(allocation_blocks[4]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[4]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{1, 4, 22}, AllocatedSlice{1, 5, 19}}})));
EXPECT_EQ(allocation_blocks[5]->offset, 2);
EXPECT_FALSE(allocation_blocks[5]->repacked_slice_data.has_value());
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SliceTimePermutationsMatchOriginalSizeTimeMapping) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 10, 2, 0));
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 3, 2));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 5}, AllocatedSlice{1, 4, 11}}});
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 2, 6));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
ASSERT_TRUE(allocation_blocks[1]->repacked_slice_data.has_value());
ASSERT_EQ(
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset.size(),
2);
const AllocatedSlice& slice_with_smaller_offset =
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset[0];
const AllocatedSlice& slice_with_larger_offset =
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset[1];
ASSERT_GT(slice_with_smaller_offset.size, slice_with_larger_offset.size);
const AllocatedSlice& larger_slice = slice_with_smaller_offset;
const AllocatedSlice& smaller_slice = slice_with_larger_offset;
ASSERT_LT(larger_slice.inclusive_start_time,
smaller_slice.inclusive_start_time);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SliceTimePermutationsMatchOriginalSizeTimeMapping2) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 10, 2, 0));
allocation_blocks.push_back(MakeAllocationBlock(11, 20, 2, 4));
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 3, 1));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{1, 1, 5}, AllocatedSlice{2, 2, 11}}});
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 2);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[2]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{1, 2, 5}, AllocatedSlice{2, 3, 11}}})));
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, SlicedColocationsFit) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 12, 2));
allocation_blocks.push_back(MakeAllocationBlock(0, 8, 2));
allocation_blocks.push_back(MakeAllocationBlock(5, 11, 2));
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 15}, AllocatedSlice{3, -1, 18}}});
allocation_blocks.push_back(MakeAllocationBlock(9, 14, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 9}, AllocatedSlice{2, -1, 12}}});
allocation_blocks.back()->next_colocated = allocation_blocks[3];
allocation_blocks[3]->next_colocated = allocation_blocks.back();
allocation_blocks.push_back(MakeAllocationBlock(15, 17, 5));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 2);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 4);
ASSERT_FALSE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[3]->offset, 2);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 15}, AllocatedSlice{3, 4, 18}}})));
EXPECT_EQ(allocation_blocks[4]->offset, 2);
ASSERT_TRUE(allocation_blocks[4]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[4]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 9}, AllocatedSlice{2, 4, 12}}})));
EXPECT_EQ(allocation_blocks[5]->offset, 4);
EXPECT_FALSE(allocation_blocks[5]->repacked_slice_data.has_value());
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SlicedColocationsPermutationsMatchOriginalSizeTimeMapping) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(1, 5, 2));
allocation_blocks.push_back(MakeAllocationBlock(11, 15, 2));
allocation_blocks.push_back(MakeAllocationBlock(1, 10, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 6}, AllocatedSlice{3, 4, 1}}});
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}});
allocation_blocks.back()->next_colocated = allocation_blocks[2];
allocation_blocks[2]->next_colocated = allocation_blocks.back();
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 2);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}})));
EXPECT_EQ(allocation_blocks[3]->offset, 2);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}})));
}
} |
1,997 | cpp | tensorflow/tensorflow | memory_bound_loop_optimizer | third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer.cc | third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_MEMORY_BOUND_LOOP_OPTIMIZER_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_MEMORY_BOUND_LOOP_OPTIMIZER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
class MemoryBoundLoopOptimizer {
public:
struct LoopValue {
enum class AllocationType {
kTemporary,
kLoopCarriedDependence,
kPinned,
kPrefetch,
kUnsupported
};
static std::string AllocationTypeToString(AllocationType allocation_type);
std::string ToString() const;
bool IsAllocationTypeSupported() const;
std::vector<const HloValue*> hlo_values;
std::optional<HloPosition> header_position;
std::vector<std::pair<int64_t, HloPosition>> prev_iteration_positions;
std::vector<std::pair<int64_t, HloPosition>> loop_positions;
std::vector<std::pair<int64_t, HloUse>> loop_uses;
std::vector<std::pair<int64_t, HloUse>> next_iteration_uses;
AllocationType allocation_type;
int64_t size;
float savings;
float savings_per_byte;
AllocationSequence allocations;
};
static absl::StatusOr<std::unique_ptr<MemoryBoundLoopOptimizer>> Create(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis_,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn);
void Optimize();
float CalculateExecutionTime() const;
const std::vector<LoopValue>& loop_values() const { return loop_values_; }
std::vector<LoopValue>& loop_values() { return loop_values_; }
const std::vector<int64_t>& remaining_memory() const {
return remaining_memory_;
}
int loop_start() const { return loop_start_; }
int loop_end() const { return loop_end_; }
int loop_size() const { return loop_size_; }
private:
struct AllocatePrefetchesContext {
absl::Span<LoopValue*> values;
std::vector<int> value_indices;
std::vector<float> bandwidth_idle_times;
std::vector<int64_t> additional_memory_used;
};
MemoryBoundLoopOptimizer(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis_,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn);
absl::Status Initialize();
void MaybeCreateLoopValue(const HloBuffer& buffer,
const HloComputation* loop_computation);
void SortLoopValues();
void PostProcess();
void AllocateLoopValues();
bool AllocateBetween(int64_t begin_idx, int64_t end_idx, int64_t size);
bool AllocateTemporary(LoopValue& value);
bool AllocatePinned(LoopValue& value);
bool AllocatePrefetches(absl::Span<LoopValue*> values);
bool AllocatePrefetch(int value_index, AllocatePrefetchesContext& context);
void AddAllLoopPositionsAndUses(LoopValue& value,
bool allocate_next_iteration_uses);
float GetBandwidthIdleTime(int idx) const;
float GetBandwidthIdleTime(
int idx,
const absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>&
additional_uses_in_alternate_mem,
const absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>&
additional_positions_in_alternate_mem) const;
float GetInstructionElapsed(int idx) const;
int loop_start_;
int loop_end_;
int loop_size_;
uint64_t alternate_memory_size_;
MemoryBoundLoopOptimizerOptions options_;
const HloLiveRange& hlo_live_range_;
const HloAliasAnalysis& alias_analysis_;
const CostAnalysis& cost_analysis_;
BufferValue::SizeFunction size_function_;
absl::flat_hash_map<const HloInstruction*, int64_t> instructions_in_loop_;
absl::flat_hash_map<const HloInstruction*, int64_t>
instructions_in_prev_iteration_;
absl::flat_hash_map<const HloInstruction*, int64_t>
instructions_in_next_iteration_;
std::vector<LoopValue> loop_values_;
std::vector<int64_t> remaining_memory_;
absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>
uses_in_alternate_mem_;
absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>
positions_in_alternate_mem_;
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn_;
};
}
}
#endif
#include "xla/service/memory_space_assignment/memory_bound_loop_optimizer.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace memory_space_assignment {
namespace {
std::optional<int64_t> GetInstructionIndex(
const HloInstruction* instruction,
const absl::flat_hash_map<const HloInstruction*, int64_t>&
instructions_to_index) {
auto it = instructions_to_index.find(instruction);
return it == instructions_to_index.end() ? std::nullopt
: std::optional<int64_t>(it->second);
}
}
absl::StatusOr<std::unique_ptr<MemoryBoundLoopOptimizer>>
MemoryBoundLoopOptimizer::Create(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range, const HloAliasAnalysis& alias_analysis,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn) {
std::unique_ptr<MemoryBoundLoopOptimizer> optimizer =
absl::WrapUnique(new MemoryBoundLoopOptimizer(
loop_start, loop_end, alternate_memory_size, options, hlo_live_range,
alias_analysis, cost_analysis, size_function,
reserved_scoped_memory_fn));
TF_RETURN_IF_ERROR(optimizer->Initialize());
return std::move(optimizer);
}
MemoryBoundLoopOptimizer::MemoryBoundLoopOptimizer(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range, const HloAliasAnalysis& alias_analysis,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn)
: loop_start_(loop_start),
loop_end_(loop_end),
loop_size_(loop_end - loop_start),
alternate_memory_size_(alternate_memory_size),
options_(options),
hlo_live_range_(hlo_live_range),
alias_analysis_(alias_analysis),
cost_analysis_(cost_analysis),
size_function_(size_function),
reserved_scoped_memory_fn_(reserved_scoped_memory_fn) {}
absl::Status MemoryBoundLoopOptimizer::Initialize() {
const auto& instruction_sequence =
hlo_live_range_.flattened_instruction_sequence().instructions();
VLOG(3) << "MemoryBoundLoopOptimizer::Initialize, loop start: " << loop_start_
<< ", loop end: " << loop_end_ << ", loop size: " << loop_size_;
const HloComputation* loop_computation = nullptr;
int prev_iteration_start = loop_start_ - loop_size_;
int next_iteration_start = loop_start_ + loop_size_;
for (int i = 0; i < loop_size_; ++i) {
const HloInstruction* loop_inst = instruction_sequence[loop_start_ + i];
instructions_in_loop_[loop_inst] = i;
const HloInstruction* prev_iteration_inst =
instruction_sequence[prev_iteration_start + i];
instructions_in_prev_iteration_[prev_iteration_inst] = i;
const HloInstruction* next_iteration_inst =
instruction_sequence[next_iteration_start + i];
instructions_in_next_iteration_[next_iteration_inst] = i;
VLOG(3) << " inst in loop [" << (i) << "]: " << loop_inst->name();
if (!loop_computation) {
loop_computation = loop_inst->parent();
} else {
TF_RET_CHECK(loop_computation == loop_inst->parent());
}
remaining_memory_.push_back(
alternate_memory_size_ -
reserved_scoped_memory_fn_(loop_inst,
{},
{}));
}
std::set<const HloBuffer*> buffers_to_process;
for (const auto& [instruction, idx] : instructions_in_loop_) {
auto maybe_add_buffer = [&](const HloInstruction* instruction) {
return [this, &buffers_to_process, instruction](const Shape& subshape,
const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
const HloBuffer& buffer =
alias_analysis_.GetUniqueBufferAt(instruction, index);
if (buffers_to_process.find(&buffer) == buffers_to_process.end()) {
buffers_to_process.insert(&buffer);
}
};
};
ShapeUtil::ForEachSubshape(instruction->shape(),
maybe_add_buffer(instruction));
for (const HloInstruction* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(operand->shape(), maybe_add_buffer(operand));
}
}
for (const HloBuffer* buffer : buffers_to_process) {
MaybeCreateLoopValue(*buffer, loop_computation);
}
return absl::OkStatus();
}
void MemoryBoundLoopOptimizer::MaybeCreateLoopValue(
const HloBuffer& buffer, const HloComputation* loop_computation) {
loop_values_.push_back({});
LoopValue& loop_value = loop_values_.back();
float pos_bytes = 0;
float use_bytes = 0;
bool has_footer_consumer = false;
for (const HloValue* value : buffer.values()) {
for (const HloPosition& position : value->positions()) {
if (position.instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
std::optional<int64_t> loop_index =
GetInstructionIndex(position.instruction, instructions_in_loop_);
std::optional<int64_t> prev_iteration_index;
if (loop_index) {
loop_value.loop_positions.push_back({*loop_index, position});
VLOG(3) << "Pos match: " << position.instruction->name() << " at "
<< *loop_index;
} else if ((prev_iteration_index = GetInstructionIndex(
position.instruction, instructions_in_prev_iteration_))) {
loop_value.prev_iteration_positions.push_back(
{*prev_iteration_index, position});
VLOG(3) << "Pos match (prev iteration): "
<< position.instruction->name() << " at "
<< *prev_iteration_index;
} else if (loop_value.prev_iteration_positions.empty() &&
loop_value.loop_positions.empty() &&
position.instruction->parent() == loop_computation &&
!loop_value.header_position) {
loop_value.header_position = position;
}
if (loop_index || prev_iteration_index) {
float bytes_accessed = cost_analysis_.base_costs().OutputBytesAccessed(
*position.instruction, position.index);
pos_bytes += bytes_accessed;
VLOG(3) << " accessed: " << bytes_accessed;
}
}
for (const HloUse& use : value->GetUses()) {
if (use.instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
std::optional<int64_t> loop_index =
GetInstructionIndex(use.instruction, instructions_in_loop_);
std::optional<int64_t> next_iteration_index;
if (loop_index) {
loop_value.loop_uses.push_back({*loop_index, use});
VLOG(3) << "Use match: " << use.instruction->name() << " at "
<< *loop_index;
} else if ((next_iteration_index = GetInstructionIndex(
use.instruction, instructions_in_next_iteration_))) {
loop_value.next_iteration_uses.push_back({*next_iteration_index, use});
VLOG(3) << "Use match (next iteration): " << use.instruction->name()
<< " at " << *next_iteration_index;
} else if (!loop_value.loop_positions.empty() ||
!loop_value.loop_uses.empty()) {
has_footer_consumer = true;
}
if (loop_index || next_iteration_index) {
float bytes_accessed = cost_analysis_.base_costs().OperandBytesAccessed(
*use.instruction, use.operand_number, use.operand_index);
use_bytes += bytes_accessed;
VLOG(3) << " accessed: " << bytes_accessed;
}
}
}
if ((!loop_value.loop_positions.empty() || !loop_value.loop_uses.empty()) &&
loop_value.prev_iteration_positions.empty()) {
loop_value.size = size_function_(**buffer.values().begin());
VLOG(3) << "Size: " << loop_value.size;
loop_value.allocation_type = LoopValue::AllocationType::kUnsupported;
auto position_compare = [](const std::pair<int64_t, HloPosition>& a,
const std::pair<int64_t, HloPosition>& b) {
return a.first < b.first;
};
auto use_compare = [](const std::pair<int64_t, HloUse>& a,
const std::pair<int64_t, HloUse>& b) {
return a.first < b.first;
};
absl::c_sort(loop_value.loop_positions, position_compare);
absl::c_sort(loop_value.prev_iteration_positions, position_compare);
absl::c_sort(loop_value.loop_uses, use_compare);
absl::c_sort(loop_value.next_iteration_uses, use_compare);
if (!loop_value.loop_positions.empty()) {
if (loop_value.next_iteration_uses.empty() &&
!loop_value.loop_uses.empty()) {
loop_value.allocation_type = LoopValue::AllocationType::kTemporary;
} else if (!loop_value.next_iteration_uses.empty()) {
if (loop_value.next_iteration_uses.back().first >=
loop_value.loop_positions.front().first) {
loop_value.allocation_type =
LoopValue::AllocationType::kLoopCarriedDependence;
} else {
loop_value.allocation_type = LoopValue::AllocationType::kTemporary;
}
}
} else if (loop_value.header_position && !loop_value.loop_uses.empty()) {
if (loop_value.loop_uses.size() ==
loop_value.next_iteration_uses.size() &&
loop_value.loop_uses.front().first ==
loop_value.next_iteration_uses.front().first) {
loop_value.allocation_type = LoopValue::AllocationType::kPinned;
} else if (loop_value.next_iteration_uses.empty() ||
loop_value.next_iteration_uses.back().first <
loop_value.loop_uses.front().first) {
loop_value.allocation_type = LoopValue::AllocationType::kPrefetch;
}
}
VLOG(3) << "Allocation type "
<< LoopValue::AllocationTypeToString(loop_value.allocation_type);
VLOG(3) << "Pos bytes: " << pos_bytes << " use bytes: " << use_bytes;
float savings = pos_bytes + use_bytes;
if (loop_value.header_position) {
savings -= loop_value.size;
}
if (!loop_value.loop_positions.empty() && has_footer_consumer) {
savings -= loop_value.size;
}
loop_value.savings = savings;
loop_value.savings_per_byte = savings / loop_value.size;
VLOG(3) << "Savings: " << loop_value.savings;
VLOG(3) << "Savings per byte: " << loop_value.savings_per_byte;
for (const HloValue* value : buffer.values()) {
VLOG(3) << value->ToString();
}
loop_value.hlo_values = buffer.values();
} else {
loop_values_.pop_back();
}
}
void MemoryBoundLoopOptimizer::Optimize() {
SortLoopValues();
AllocateLoopValues();
PostProcess();
}
float MemoryBoundLoopOptimizer::CalculateExecutionTime() const {
std::vector<std::pair<const CopyAllocation*, float>> prefetches;
for (const LoopValue& value : loop_values_) {
if (!value.allocations.empty() &&
value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(
{static_cast<const CopyAllocation*>(value.allocations.back().get()),
cost_analysis_.GetAsyncCopyElapsed(
value.hlo_values.front()->shape())});
}
}
auto get_effective_done_time =
[&](int64_t copy_start_schedule_after,
int64_t copy_done_schedule_before) -> int64_t {
if (copy_start_schedule_after == loop_size_ - 1 &&
copy_done_schedule_before == 0) {
return 2 * loop_size_;
}
if (copy_start_schedule_after + 1 >= copy_done_schedule_before) {
return copy_done_schedule_before + loop_size_;
}
return copy_done_schedule_before;
};
absl::c_sort(
prefetches, [&](const std::pair<const CopyAllocation*, float>& a,
const std::pair<const CopyAllocation*, float>& b) {
return std::forward_as_tuple(
a.first->copy_start_schedule_after(),
get_effective_done_time(
a.first->copy_start_schedule_after(),
a.first->copy_done_schedule_before())) <
std::forward_as_tuple(b.first->copy_start_schedule_after(),
get_effective_done_time(
b.first->copy_start_schedule_after(),
b.first->copy_done_schedule_before()));
});
std::vector<std::optional<int>> required_prefetch_completions(loop_size_);
for (int i = 0; i < prefetches.size(); ++i) {
const auto& [prefetch, elapsed] = prefetches[i];
int required_prefetch_completion = i;
if (prefetch->copy_start_schedule_after() == loop_size_ - 1 &&
prefetch->copy_done_schedule_before() == 0) {
required_prefetch_completion -= 2 * prefetches.size();
} else if (prefetch->copy_start_schedule_after() + 1 >=
prefetch->copy_done_schedule_before()) {
required_prefetch_completion -= prefetches.size();
}
VLOG(3) << "Prefetch #" << i << " (elapsed " << elapsed
<< "): " << prefetch->ToString();
if (required_prefetch_completions[prefetch->copy_done_schedule_before()]) {
required_prefetch_completions[prefetch->copy_done_schedule_before()] =
std::max(
*required_prefetch_completions[prefetch
->copy_done_schedule_before()],
required_prefetch_completion);
} else {
required_prefetch_completions[prefetch->copy_done_schedule_before()] =
required_prefetch_completion;
}
VLOG(4)
<< "Required completion at " << prefetch->copy_done_schedule_before()
<< " = "
<< *required_prefetch_completions[prefetch
->copy_done_schedule_before()];
}
float result;
std::vector<float> bandwidth_idle_times;
std::vector<float> instructions_elapsed;
bandwidth_idle_times.reserve(loop_size_);
instructions_elapsed.reserve(loop_size_);
for (int i = 0; i < loop_size_; ++i) {
bandwidth_idle_times.push_back(GetBandwidthIdleTime(i));
instructions_elapsed.push_back(GetInstructionElapsed(i));
}
const int kNumIterations = 3;
std::vector<float> prefetch_remaining_elapsed_times(prefetches.size() *
kNumIterations);
int prefetch_start_index = 0;
int prefetch_done_index = 0;
int prefetch_completed_index = 0;
for (int iteration = 0; iteration < kNumIterations; ++iteration) {
float total_elapsed = 0;
float total_bandwidth_idle_time = 0;
float total_critical_prefetch = 0;
for (int i = 0; i < loop_size_; ++i) {
std::optional<int> required_prefetch_completion =
required_prefetch_completions[i];
if (required_prefetch_completion) {
int required_prefetch_done_index =
iteration * static_cast<int>(prefetches.size()) +
*required_prefetch_completion;
VLOG(4) << "Prefetch #"
<< ((*required_prefetch_completion + prefetches.size()) % | #include "xla/service/memory_space_assignment/memory_bound_loop_optimizer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace memory_space_assignment {
namespace {
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
int64_t SizeFunction(const BufferValue& value) {
return ShapeSize(value.shape());
}
int64_t ReservedScopedMemoryFn(
const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex>& outputs_in_alternate_memory) {
return 0;
}
class MemoryBoundLoopOptimizerTest : public HloTestBase {
public:
MemoryBoundLoopOptimizerTest() = default;
protected:
const int64_t kAlternateMemorySpace = 1;
const int64_t kDefaultMemorySpace = 0;
absl::Status Initialize(const HloModule* module,
uint64_t alternate_memory_size = 256) {
HloCostAnalysis::Options options;
MemoryBoundLoopOptimizerOptions optimizer_options;
optimizer_options.set_enabled(true);
optimizer_options.set_desired_copy_ratio(0.7);
optimizer_options.set_allow_unsatisfied_fully_pipelined_prefetch(false);
optimizer_options.set_min_num_iterations(3.0);
options_.memory_bound_loop_optimizer_options = optimizer_options;
cost_analysis_options_.alternate_mem_bandwidth_bytes_per_second = 128;
cost_analysis_options_.async_copy_bandwidth_bytes_per_second = 32;
cost_analysis_options_.pipeline_overhead_window_size_mib = 1;
options.shape_size = ShapeSize;
options.set_flops_per_second(16);
options.set_bytes_per_second(32);
options.set_transcendentals_per_second(16);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<HloCostAnalysisCosts>(*hlo_cost_analysis_);
TF_ASSIGN_OR_RETURN(cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_,
cost_analysis_options_, *module));
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(live_range_,
HloLiveRange::Run(module->schedule(), *alias_analysis_,
module->entry_computation()));
return absl::OkStatus();
}
absl::StatusOr<MemoryBoundLoopOptimizer*> CreateOptimizer(
int loop_start, int loop_end, const HloModule* module,
uint64_t alternate_memory_size = 256,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn =
ReservedScopedMemoryFn) {
TF_RETURN_IF_ERROR(Initialize(module, alternate_memory_size));
MemoryBoundLoopOptimizerOptions optimizer_options;
optimizer_options.set_enabled(true);
optimizer_options.set_desired_copy_ratio(0.7);
optimizer_options.set_allow_unsatisfied_fully_pipelined_prefetch(false);
TF_ASSIGN_OR_RETURN(
optimizer_,
MemoryBoundLoopOptimizer::Create(
loop_start, loop_end, alternate_memory_size, optimizer_options,
*live_range_, *alias_analysis_, *cost_analysis_, SizeFunction,
reserved_scoped_memory_fn));
return optimizer_.get();
}
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndCreateOptimizer(
absl::string_view hlo_loop_str, uint64_t alternate_memory_size,
int& loop_start_idx, MemoryBoundLoopOptimizer** optimizer,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn =
ReservedScopedMemoryFn) {
int loop_end_idx;
TF_ASSIGN_OR_RETURN(
std::string module_str,
ParseAndCreateModuleString(hlo_loop_str, loop_start_idx, loop_end_idx));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSIGN_OR_RETURN(
*optimizer,
CreateOptimizer(loop_start_idx, loop_end_idx, module.get(),
alternate_memory_size, reserved_scoped_memory_fn));
return std::move(module);
}
absl::StatusOr<std::string> ParseAndCreateModuleString(
absl::string_view hlo_loop_str, int& loop_start_idx, int& loop_end_idx) {
RE2 op_re("\\$op([0-9]+) += +(\\S+).*");
std::vector<absl::string_view> ops;
std::vector<absl::string_view> op_types;
int begin_pos = 0;
absl::string_view submatch[3];
while (op_re.Match(hlo_loop_str, begin_pos, hlo_loop_str.size(),
RE2::UNANCHORED, submatch, 3)) {
for (int i = 0; i < 3; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
int op_num;
if (!absl::SimpleAtoi(submatch[1], &op_num)) {
return InvalidArgument("Op name expects to contain a number, found %s.",
submatch[1]);
}
if (op_num != ops.size()) {
return InvalidArgument("Op number expected to be %d found %d.",
op_types.size(), op_num);
}
ops.push_back(submatch[0]);
op_types.push_back(submatch[2]);
begin_pos = submatch[0].data() - hlo_loop_str.data() + submatch[0].size();
}
RE2 param_re("([[:alnum:]]+\\[\\S*\\]) +\\$param([0-9]+)");
std::vector<absl::string_view> param_types;
begin_pos = 0;
while (param_re.Match(hlo_loop_str, begin_pos, hlo_loop_str.size(),
RE2::UNANCHORED, submatch, 3)) {
for (int i = 0; i < 3; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
int param_num;
if (!absl::SimpleAtoi(submatch[2], ¶m_num)) {
return InvalidArgument(
"Param name expects to contain a number, found %s.", submatch[2]);
}
while (param_num >= param_types.size()) {
param_types.push_back({});
}
param_types[param_num] = submatch[1];
begin_pos = submatch[0].data() - hlo_loop_str.data() + submatch[0].size();
}
RE2 root_re("ROOT \\$root += +tuple\\((.*)\\)");
absl::string_view root_values;
if (root_re.Match(hlo_loop_str, 0, hlo_loop_str.size(), RE2::UNANCHORED,
submatch, 2)) {
for (int i = 0; i < 2; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
root_values = submatch[1];
}
for (absl::string_view op_type : op_types) {
VLOG(4) << "op_type: " << op_type;
}
for (absl::string_view param_type : param_types) {
VLOG(4) << "param_type: " << param_type;
}
std::string hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
)";
int total_instructions = 0;
for (absl::string_view param_prefix : {"prev_", "", "next_"}) {
for (int i = 0; i < param_types.size(); ++i) {
int parameter_number = total_instructions;
absl::StrAppend(&hlo_string, " ", param_prefix, "param", i, " = ",
param_types[i], " parameter(", parameter_number,
")
}
}
for (int i = 0; i < op_types.size(); ++i) {
int parameter_number = total_instructions;
absl::StrAppend(&hlo_string, " ", "prev_prev_op", i, " = ", op_types[i],
" parameter(", parameter_number, ")
total_instructions++, "\n");
}
std::string new_root_values;
auto print_ops =
[&](const std::vector<std::pair<const absl::string_view, std::string>>&
replacements) {
for (int i = 0; i < ops.size(); ++i) {
absl::StrAppend(&hlo_string, " ",
absl::StrReplaceAll(ops[i], replacements), "
total_instructions++, "\n");
}
if (!root_values.empty()) {
absl::StrAppend(&new_root_values,
new_root_values.empty() ? "" : ", ",
absl::StrReplaceAll(root_values, replacements));
}
};
std::vector<std::pair<const absl::string_view, std::string>>
prev_replacements;
prev_replacements.push_back({"$prev_op", "prev_prev_op"});
prev_replacements.push_back({"$op", "prev_op"});
prev_replacements.push_back({"$param", "prev_param"});
absl::StrAppend(&hlo_string, "
print_ops(prev_replacements);
loop_start_idx = total_instructions;
std::vector<std::pair<const absl::string_view, std::string>> replacements;
replacements.push_back({"$", ""});
absl::StrAppend(&hlo_string, "
print_ops(replacements);
loop_end_idx = total_instructions;
std::vector<std::pair<const absl::string_view, std::string>>
next_replacements;
next_replacements.push_back({"$prev_op", "op"});
next_replacements.push_back({"$op", "next_op"});
next_replacements.push_back({"$param", "next_param"});
absl::StrAppend(&hlo_string, "
print_ops(next_replacements);
absl::StrAppend(&hlo_string, " ROOT root = tuple(", new_root_values,
")\n");
absl::StrAppend(&hlo_string, "}");
VLOG(1) << hlo_string;
return hlo_string;
}
absl::StatusOr<std::unique_ptr<PresetAssignments>> RunMsa(
HloModule* module, uint64_t alternate_memory_size = 256) {
options_.max_size_in_bytes = alternate_memory_size;
options_.alignment_in_bytes = 8;
options_.verify = true;
options_.alternate_memory_space = kAlternateMemorySpace;
if (!cost_analysis_) {
TF_RETURN_IF_ERROR(Initialize(module, alternate_memory_size));
}
CostAnalysis::Cache cache;
MemoryBoundednessBufferIntervalComparator comparator(*cost_analysis_,
&cache);
options_.buffer_interval_comparator = &comparator;
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis_, 0.8,
1.5,
10.0,
alternate_memory_size));
options_.prefetch_interval_picker = &prefetch_interval_picker;
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
options_.size_fn = size_fn;
auto is_allowed_in_alternate_mem = [](const HloValue& value) {
HloInstruction* instruction = value.instruction();
HloComputation* computation = instruction->parent();
bool in_entry_computation =
(computation == computation->parent()->entry_computation());
if (in_entry_computation &&
instruction->opcode() == HloOpcode::kParameter) {
return false;
}
return true;
};
options_.is_allowed_in_alternate_mem_fn = is_allowed_in_alternate_mem;
options_.max_outstanding_prefetches = -1;
options_.max_outstanding_evictions = -1;
options_.allocate_across_sequential_calls = true;
options_.cost_analysis = cost_analysis_.get();
std::unique_ptr<PresetAssignments> preset_assignments =
MemorySpaceAssignment::Run(module, *live_range_, *alias_analysis_,
options_)
.value();
return preset_assignments;
}
absl::Status VerifyMsaEquivalence(
HloModule* module, bool expect_unsupported_allocations = false) {
absl::flat_hash_map<std::pair<int, int>, const Allocation*> allocation_map;
for (const MemoryBoundLoopOptimizer::LoopValue& value :
optimizer_->loop_values()) {
if (!value.IsAllocationTypeSupported()) {
continue;
}
for (const auto& allocation : value.allocations) {
for (const HloUse& use : allocation->uses()) {
absl::string_view inst_name = use.instruction->name();
TF_RET_CHECK(absl::StartsWith(inst_name, "op"));
int inst_number;
TF_RET_CHECK(absl::SimpleAtoi(inst_name.substr(2), &inst_number));
allocation_map[{inst_number, use.operand_number}] = allocation.get();
}
}
}
auto get_inst_prefix_in_iter = [](int iteration) {
switch (iteration) {
case 0:
return "prev_";
case 1:
return "";
case 2:
return "next_";
default:
LOG(FATAL) << "Invalid iteration " << iteration;
return "INVALID";
}
};
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const auto& flattened_instructions =
live_range->flattened_instruction_sequence().instructions();
for (int iteration = 1; iteration < 3; ++iteration) {
for (int inst_number = 0; inst_number < optimizer_->loop_size();
++inst_number) {
HloInstruction* inst = FindInstruction(
module, absl::StrCat(get_inst_prefix_in_iter(iteration), "op",
inst_number));
for (int operand_number = 0; operand_number < 2; ++operand_number) {
const HloInstruction* operand = inst->operand(operand_number);
LOG(INFO) << inst->name() << ", operand " << operand_number;
if (!allocation_map.contains({inst_number, operand_number})) {
TF_RET_CHECK(expect_unsupported_allocations);
continue;
}
const Allocation* allocation =
allocation_map.at({inst_number, operand_number});
if (!allocation->is_copy_allocation()) {
EXPECT_NE(operand->opcode(), HloOpcode::kCopyDone);
int expected_memory_space =
allocation->memory_space() == MemorySpace::kDefault
? kDefaultMemorySpace
: kAlternateMemorySpace;
EXPECT_EQ(operand->shape().layout().memory_space(),
expected_memory_space);
} else {
EXPECT_EQ(allocation->memory_space(), MemorySpace::kAlternate);
TF_RET_CHECK(operand->opcode() == HloOpcode::kCopyDone);
const CopyAllocation* copy_allocation =
static_cast<const CopyAllocation*>(allocation);
if (copy_allocation->copy_done_schedule_before() != inst_number) {
EXPECT_NE(allocation->uses().front(),
(HloUse{inst, operand_number}));
continue;
}
int expected_copy_start_iteration = iteration;
if (copy_allocation->copy_start_schedule_after() ==
optimizer_->loop_size() &&
copy_allocation->copy_done_schedule_before() == 0) {
expected_copy_start_iteration -= 2;
} else if (copy_allocation->copy_start_schedule_after() + 1 >=
copy_allocation->copy_done_schedule_before()) {
expected_copy_start_iteration -= 1;
}
if (expected_copy_start_iteration >= 0) {
const HloInstruction* expected_copy_start_schedule_after =
FindInstruction(
module,
absl::StrCat(
get_inst_prefix_in_iter(
expected_copy_start_iteration),
"op", copy_allocation->copy_start_schedule_after()));
LOG(INFO) << "Expected copy start schedule after: "
<< expected_copy_start_schedule_after->name();
const HloInstruction* copy_start = operand->operand(0);
TF_RET_CHECK(copy_start->opcode() == HloOpcode::kCopyStart);
int copy_start_idx =
live_range->instruction_schedule().at(copy_start);
const HloInstruction* copy_start_schedule_after = nullptr;
for (int i = copy_start_idx - 1; i >= 0; --i) {
HloOpcode opcode = flattened_instructions.at(i)->opcode();
if (opcode != HloOpcode::kCopyStart &&
opcode != HloOpcode::kCopyDone &&
opcode != HloOpcode::kGetTupleElement &&
opcode != HloOpcode::kParameter) {
copy_start_schedule_after = flattened_instructions.at(i);
break;
}
}
TF_RET_CHECK(copy_start_schedule_after != nullptr);
EXPECT_EQ(copy_start_schedule_after,
expected_copy_start_schedule_after);
}
}
}
}
}
return absl::OkStatus();
}
private:
Options options_;
CostAnalysisOptions cost_analysis_options_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<HloCostAnalysisCosts> hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> live_range_;
std::unique_ptr<MemoryBoundLoopOptimizer> optimizer_;
};
TEST_F(MemoryBoundLoopOptimizerTest, SimplePrefetch) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndCreateOptimizer(hlo_loop_str,
128,
loop_start_idx, &optimizer));
optimizer->Optimize();
absl::flat_hash_set<HloUse> seen_uses;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << loop_value.ToString();
if (loop_value.hlo_values.front()
->defining_position()
.instruction->name() == "param0") {
EXPECT_TRUE(loop_value.allocations.back()->is_copy_allocation());
}
for (const auto& allocation : loop_value.allocations) {
for (const HloUse& use : allocation->uses()) {
EXPECT_FALSE(seen_uses.contains(use)) << use.ToString();
seen_uses.insert(use);
}
}
}
for (absl::string_view inst_name : {"op0", "op1", "op2", "op3", "op4"}) {
HloInstruction* inst =
module->entry_computation()->GetInstructionWithName(inst_name);
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 0})) << inst_name;
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 1})) << inst_name;
}
}
TEST_F(MemoryBoundLoopOptimizerTest, ReservedScopedMemory) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndCreateOptimizer(
hlo_loop_str,
128, loop_start_idx, &optimizer,
[](const HloInstruction*,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&,
const absl::flat_hash_set<ShapeIndex>&) { return 128; }));
optimizer->Optimize();
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << "Loop value: " << loop_value.ToString();
for (const auto& allocation : loop_value.allocations) {
ASSERT_NE(static_cast<int64_t>(allocation->memory_space()),
kAlternateMemorySpace);
}
}
}
TEST_F(MemoryBoundLoopOptimizerTest, GetTupleElement) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = f32[1,4] parameter(5)
p6 = f32[1,4] parameter(6)
tupleparam = (f32[1,4], f32[1,4]) parameter(7)
op1 = tanh(p0)
op2 = tanh(p1)
op3 = tanh(op2)
op4 = add(op1, op3)
op5 = tanh(p2)
op6 = tanh(p3)
op7 = tanh(op6)
op8 = add(op5, op7)
op9 = tanh(p4)
op10 = tanh(p5)
op11 = tanh(op10)
op12 = add(op9, op11)
op13 = tanh(p6)
gte = get-tuple-element(tupleparam), index=1
op14 = tanh(gte)
op15 = tanh(op14)
op16 = add(op13, op15)
ROOT root = tuple(tupleparam, op4, op8, op12, op16)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments, RunMsa(module.get()));
}
TEST_F(MemoryBoundLoopOptimizerTest, NoAlternateMem) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndCreateOptimizer(hlo_loop_str,
0,
loop_start_idx, &optimizer));
optimizer->Optimize();
absl::flat_hash_set<HloUse> seen_uses;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << loop_value.ToString();
for (const auto& allocation : loop_value.allocations) {
EXPECT_EQ(allocation->memory_space(), MemorySpace::kDefault);
for (const HloUse& use : allocation->uses()) {
EXPECT_FALSE(seen_uses.contains(use)) << use.ToString();
seen_uses.insert(use);
}
}
}
for (absl::string_view inst_name : {"op0", "op1", "op2", "op3", "op4"}) {
HloInstruction* inst =
module->entry_computation()->GetInstructionWithName(inst_name);
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 0})) << inst_name;
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 1})) << inst_name;
}
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithOverlap) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndCreateOptimizer(hlo_loop_str,
512,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 3);
bool seen_overlap = false;
bool seen_nonoverlap = false;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op14") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 14);
EXPECT_EQ(prefetch->copy_start_schedule_after(), 0);
} else {
ASSERT_EQ(use.instruction->name(), "op1");
EXPECT_EQ(prefetch->copy_done_schedule_before(), 1);
if (prefetch->copy_start_schedule_after() == 0 |
1,998 | cpp | tensorflow/tensorflow | prefetch_interval_picker | third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker.cc | third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_PREFETCH_INTERVAL_PICKER_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_PREFETCH_INTERVAL_PICKER_H_
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/shape.h"
#include "xla/util.h"
namespace xla {
namespace memory_space_assignment {
class PrefetchIntervalPicker {
public:
PrefetchIntervalPicker() = default;
virtual ~PrefetchIntervalPicker() = default;
virtual bool CanAllocateInAlternateMemoryNoCopy(const Shape& shape,
int64_t start_time,
int64_t end_time) const = 0;
virtual int64_t PreferredEvictionEndTime(const Shape& shape,
int64_t start_time,
int64_t latest_end_time) const = 0;
virtual int64_t LatestPrefetchStartTime(const Shape& shape,
int64_t start_time, int64_t end_time,
const HloUse* use) const = 0;
virtual int64_t PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const = 0;
virtual int64_t LatestPrefetchEndTime(
int64_t original_prefetch_end_time,
int64_t proposed_prefetch_end_time) const {
return proposed_prefetch_end_time;
}
virtual int64_t EstimatedPrefetchEndTime(const Shape& shape,
int64_t start_time,
int64_t end_time) const = 0;
virtual float GetLogicalIntervalElapsed(int64_t start_time,
int64_t end_time) const = 0;
virtual void Begin(const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) = 0;
virtual int64_t Next() = 0;
virtual bool Done() const = 0;
virtual int64_t latest_time() const = 0;
virtual void SetRetryNumber(int retry_number) {
retry_number_ = retry_number;
}
int retry_number() const { return retry_number_; }
virtual std::string ToDebugString() const = 0;
virtual std::string ToNoCopyDebugString(const Shape& shape,
int64_t start_time,
int64_t end_time) const = 0;
virtual std::optional<float> BufferIntervalAlternateMemoryBenefit(
const GlobalDecreasingSizeBestFitHeap<HloValue>::BufferInterval& interval)
const {
return std::nullopt;
}
protected:
const absl::flat_hash_map<const HloInstruction*, int64_t>*
instruction_schedule_ = nullptr;
int retry_number_ = 0;
};
class InstructionCountPrefetchIntervalPicker : public PrefetchIntervalPicker {
public:
InstructionCountPrefetchIntervalPicker(int64_t min_overlap_count,
int64_t max_overlap_count)
: min_overlap_count_(min_overlap_count),
max_overlap_count_(max_overlap_count) {}
bool CanAllocateInAlternateMemoryNoCopy(const Shape& shape,
int64_t start_time,
int64_t end_time) const override;
int64_t PreferredEvictionEndTime(const Shape& shape, int64_t start_time,
int64_t latest_end_time) const override;
int64_t LatestPrefetchStartTime(const Shape& shape, int64_t start_time,
int64_t end_time,
const HloUse* use) const override;
int64_t PreferredPrefetchStartTime(const Shape& shape,
int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time,
int64_t prefetch_end_time) const override;
int64_t EstimatedPrefetchEndTime(const Shape& shape, int64_t start_time,
int64_t end_time) const override;
float GetLogicalIntervalElapsed(int64_t start_time,
int64_t end_time) const override;
void Begin(const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) override;
int64_t Next() override;
bool Done() const override;
int64_t latest_time() const override;
std::string ToDebugString() const override;
std::string ToNoCopyDebugString(const Shape& shape, int64_t start_time,
int64_t end_time) const override;
private:
int64_t min_overlap_count_;
int64_t max_overlap_count_;
int64_t end_time_;
int64_t current_prefetch_time_;
};
class CostAnalysisPrefetchIntervalPicker : public PrefetchIntervalPicker {
public:
CostAnalysisPrefetchIntervalPicker(
const CostAnalysis& cost_analysis, float min_overlap_to_async_copy_ratio,
float preferred_overlap_to_async_copy_ratio,
float max_overlap_to_mem_size_async_copy_ratio, int64_t mem_size_bytes,
const Shape* shape_override = nullptr);
bool CanAllocateInAlternateMemoryNoCopy(const Shape& shape,
int64_t start_time,
int64_t end_time) const override;
int64_t PreferredEvictionEndTime(const Shape& shape, int64_t start_time,
int64_t latest_end_time) const override;
int64_t LatestPrefetchEndTime(
int64_t original_prefetch_end_time,
int64_t proposed_prefetch_end_time) const override;
int64_t LatestPrefetchStartTime(const Shape& shape, int64_t start_time,
int64_t end_time,
const HloUse* use) const override;
int64_t PreferredPrefetchStartTime(const Shape& shape,
int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time,
int64_t prefetch_end_time) const override;
int64_t EstimatedPrefetchEndTime(const Shape& shape, int64_t start_time,
int64_t end_time) const override;
float GetLogicalIntervalElapsed(int64_t start_time,
int64_t end_time) const override;
void Begin(const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) override;
int64_t Next() override;
bool Done() const override;
int64_t latest_time() const override;
void SetRetryNumber(int retry_number) override;
std::string ToDebugString() const override;
std::string ToNoCopyDebugString(const Shape& shape, int64_t start_time,
int64_t end_time) const override;
std::optional<float> BufferIntervalAlternateMemoryBenefit(
const GlobalDecreasingSizeBestFitHeap<HloValue>::BufferInterval& interval)
const override;
private:
int GetMinWhileNestLevel(int64_t start_time, int64_t end_time) const;
float GetMaxElapsedInAlternateMemory(float async_copy_elapsed) const;
std::vector<float> elapsed_time_cumsum_;
std::vector<int> while_nest_level_;
std::vector<int> computation_nest_level_;
std::vector<int> while_nest_level_change_;
const CostAnalysis& cost_analysis_;
float min_overlap_to_async_copy_ratio_;
float preferred_overlap_to_async_copy_ratio_;
float max_async_copy_elapsed_;
float async_copy_elapsed_;
float inst_elapsed_reduction_;
int64_t end_logical_time_;
int64_t earliest_prefetch_time_;
int64_t latest_prefetch_time_;
bool using_increasing_prefetch_time_iterator_ = true;
int64_t increasing_prefetch_time_iterator_;
int64_t decreasing_prefetch_time_iterator_;
std::vector<float> while_execution_counts_;
std::optional<Shape> shape_override_;
};
}
}
#endif
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace memory_space_assignment {
namespace {
const float kEvictionRetryMultiplier = 2.0;
const int kNumExploredDecreasingIntervals = 100;
}
bool InstructionCountPrefetchIntervalPicker::CanAllocateInAlternateMemoryNoCopy(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return end_time - start_time <= max_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::PreferredEvictionEndTime(
const Shape& shape, int64_t start_time, int64_t latest_end_time) const {
return std::min(start_time + min_overlap_count_, latest_end_time);
}
int64_t InstructionCountPrefetchIntervalPicker::LatestPrefetchStartTime(
const Shape& shape, int64_t start_time, int64_t end_time,
const HloUse* use) const {
return end_time - min_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const {
return std::max(earliest_prefetch_start_time,
prefetch_end_time - max_overlap_count_);
}
int64_t InstructionCountPrefetchIntervalPicker::EstimatedPrefetchEndTime(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return end_time;
}
float InstructionCountPrefetchIntervalPicker::GetLogicalIntervalElapsed(
int64_t start_time, int64_t end_time) const {
return static_cast<float>(end_time - start_time - 1);
}
void InstructionCountPrefetchIntervalPicker::Begin(
const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) {
end_time_ = end_time;
const Shape& shape = ShapeUtil::GetSubshape(
use.instruction->operand(use.operand_number)->shape(), use.operand_index);
if (preferred_time) {
current_prefetch_time_ = *preferred_time;
} else {
current_prefetch_time_ =
PreferredPrefetchStartTime(shape, start_time, end_time, end_time);
}
}
int64_t InstructionCountPrefetchIntervalPicker::Next() {
CHECK(!Done()) << "Prefetch interval picker's Next() is called even though "
"Done() is false";
return current_prefetch_time_++;
}
bool InstructionCountPrefetchIntervalPicker::Done() const {
return end_time_ - current_prefetch_time_ <= min_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::latest_time() const {
return end_time_ - min_overlap_count_ - 1;
}
std::string InstructionCountPrefetchIntervalPicker::ToDebugString() const {
return absl::StrCat("Overlapped HLOs = ", end_time_ - current_prefetch_time_);
}
std::string InstructionCountPrefetchIntervalPicker::ToNoCopyDebugString(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return absl::StrCat("Overlapped HLOs = ", end_time - start_time);
}
CostAnalysisPrefetchIntervalPicker::CostAnalysisPrefetchIntervalPicker(
const CostAnalysis& cost_analysis, float min_overlap_to_async_copy_ratio,
float preferred_overlap_to_async_copy_ratio,
float max_overlap_to_mem_size_async_copy_ratio, int64_t mem_size_bytes,
const Shape* shape_override)
: while_nest_level_(
cost_analysis.hlo_live_range().instruction_schedule().size() + 1, 0),
computation_nest_level_(
cost_analysis.hlo_live_range().instruction_schedule().size() + 1, 0),
cost_analysis_(cost_analysis),
min_overlap_to_async_copy_ratio_(min_overlap_to_async_copy_ratio),
preferred_overlap_to_async_copy_ratio_(
preferred_overlap_to_async_copy_ratio),
max_async_copy_elapsed_(
cost_analysis_.GetAsyncCopyElapsed(
ShapeUtil::MakeShape(S32, {mem_size_bytes / 4})) *
max_overlap_to_mem_size_async_copy_ratio),
shape_override_(shape_override ? std::optional(*shape_override)
: std::nullopt) {
instruction_schedule_ =
&cost_analysis_.hlo_live_range().instruction_schedule();
std::vector<float> instructions_elapsed_time(
instruction_schedule_->size() + 1, 0.0);
int max_while_nest_level = 0;
for (const auto& instruction_and_logical_time : *instruction_schedule_) {
const HloInstruction* instruction = instruction_and_logical_time.first;
int64_t logical_time = instruction_and_logical_time.second;
if (logical_time >= instructions_elapsed_time.size()) {
instructions_elapsed_time.resize(logical_time + 1, 0.0);
while_nest_level_.resize(logical_time + 1, 0);
}
int while_nest_level = cost_analysis_.CalculateComputationNestLevel(
instruction_and_logical_time.first, true);
while_nest_level_[logical_time] = while_nest_level;
max_while_nest_level = std::max(max_while_nest_level, while_nest_level);
int computation_nest_level = cost_analysis_.CalculateComputationNestLevel(
instruction_and_logical_time.first, false);
computation_nest_level_[logical_time] = computation_nest_level;
if (instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kConditional) {
continue;
}
float elapsed_time = cost_analysis_.GetInstructionElapsed(
*instruction_and_logical_time.first);
instructions_elapsed_time[logical_time] =
elapsed_time * cost_analysis_.GetWhileNestMultiplier(while_nest_level);
}
float cumsum = 0.0;
elapsed_time_cumsum_.reserve(instructions_elapsed_time.size());
for (float elapsed_time : instructions_elapsed_time) {
cumsum += elapsed_time;
elapsed_time_cumsum_.push_back(cumsum);
}
const int64_t size = instructions_elapsed_time.size();
CHECK_EQ(size, while_nest_level_.size());
std::vector<int> most_recent_by_level(while_nest_level_.size(), -1);
int prev_nest_level = 0;
int change_idx = -1;
while_nest_level_change_.reserve(size);
for (int i = 0; i < size; ++i) {
int nest_level = while_nest_level_[i];
if (nest_level != prev_nest_level) {
prev_nest_level = nest_level;
change_idx = -1;
for (int smaller_level = 0; smaller_level < nest_level; smaller_level++) {
change_idx = std::max(change_idx, most_recent_by_level[smaller_level]);
}
}
most_recent_by_level[nest_level] = i;
while_nest_level_change_.push_back(change_idx);
}
for (int i = 0; i <= max_while_nest_level; ++i) {
while_execution_counts_.push_back(cost_analysis_.GetWhileNestMultiplier(i));
}
}
float CostAnalysisPrefetchIntervalPicker::GetMaxElapsedInAlternateMemory(
float async_copy_elapsed) const {
return max_async_copy_elapsed_;
}
bool CostAnalysisPrefetchIntervalPicker::CanAllocateInAlternateMemoryNoCopy(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
return GetMaxElapsedInAlternateMemory(async_copy_elapsed) >
logical_interval_elapsed;
}
int64_t CostAnalysisPrefetchIntervalPicker::PreferredEvictionEndTime(
const Shape& shape, int64_t start_time, int64_t latest_end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t end_time;
for (end_time = start_time + 1; end_time <= latest_end_time; ++end_time) {
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
if (logical_interval_elapsed >=
(1 + kEvictionRetryMultiplier * retry_number_) *
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed) {
break;
}
}
return end_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::LatestPrefetchStartTime(
const Shape& shape, int64_t start_time, int64_t end_time,
const HloUse* use) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float inst_elapsed_reduction = 0.0f;
if (use) {
float elapsed_time =
cost_analysis_.GetInstructionElapsed(*use->instruction);
float elapsed_time_in_alternate_mem =
cost_analysis_.GetInstructionElapsedInAlternateMemory(
*use->instruction,
{std::make_pair(use->operand_number, use->operand_index)},
{});
inst_elapsed_reduction = elapsed_time - elapsed_time_in_alternate_mem;
}
int end_nest_level = computation_nest_level_[end_time];
float min_interval = min_overlap_to_async_copy_ratio_ * async_copy_elapsed;
int latest_prefetch_time;
for (latest_prefetch_time = end_time - 1;
latest_prefetch_time >= start_time &&
(computation_nest_level_[latest_prefetch_time] != end_nest_level ||
min_interval >
GetLogicalIntervalElapsed(latest_prefetch_time, end_time) +
inst_elapsed_reduction);
--latest_prefetch_time) {
}
return latest_prefetch_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t preferred_prefetch_start_time = earliest_prefetch_start_time;
float preferred_interval =
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed;
float best_interval = GetLogicalIntervalElapsed(earliest_prefetch_start_time,
prefetch_end_time);
int end_nest_level = computation_nest_level_[prefetch_end_time];
for (int64_t prefetch_start_time = earliest_prefetch_start_time + 1;
prefetch_start_time <= latest_prefetch_start_time;
++prefetch_start_time) {
float interval =
GetLogicalIntervalElapsed(prefetch_start_time, prefetch_end_time);
if (computation_nest_level_[prefetch_start_time] == end_nest_level &&
std::abs(preferred_interval - interval) <
std::abs(preferred_interval - best_interval)) {
best_interval = interval;
preferred_prefetch_start_time = prefetch_start_time;
}
}
return preferred_prefetch_start_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::LatestPrefetchEndTime(
int64_t original_prefetch_end_time,
int64_t proposed_prefetch_end_time) const {
int64_t original_nest_level =
computation_nest_level_[original_prefetch_end_time];
int64_t new_prefetch_end_time;
for (new_prefetch_end_time = proposed_prefetch_end_time;
computation_nest_level_[new_prefetch_end_time] != original_nest_level;
--new_prefetch_end_time) {
}
return new_prefetch_end_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::EstimatedPrefetchEndTime(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t estimated_end_time;
for (estimated_end_time = start_time + 1; estimated_end_time < end_time;
++estimated_end_time) {
float interval = GetLogicalIntervalElapsed(start_time, estimated_end_time);
if (interval >= async_copy_elapsed) {
break;
}
}
return estimated_end_time;
}
void CostAnalysisPrefetchIntervalPicker::Begin(
const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) {
const Shape& shape = ShapeUtil::GetSubshape(
use.instruction->operand(use.operand_number)->shape(), use.operand_index);
async_copy_elapsed_ = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float elapsed_time = cost_analysis_.GetInstructionElapsed(*use.instruction);
float elapsed_time_in_alternate_mem =
cost_analysis_.GetInstructionElapsedInAlternateMemory(
*use.instruction,
{std::make_pair(use.operand_number, use.operand_index)},
{});
inst_elapsed_reduction_ = elapsed_time - elapsed_time_in_alternate_mem;
end_logical_time_ = end_time;
int end_nest_level = computation_nest_level_[end_logical_time_];
float min_interval = min_overlap_to_async_copy_ratio_ * async_copy_elapsed_;
latest_prefetch_time_ =
LatestPrefetchStartTime(shape, start_time, end_time, &use);
float max_interval = GetMaxElapsedInAlternateMemory(async_copy_elapsed_);
for (earliest_prefetch_time_ = start_time;
earliest_prefetch_time_ < latest_prefetch_time_ &&
(computation_nest_level_[earliest_prefetch_time_] != end_nest_level ||
max_interval < GetLogicalIntervalElapsed(earliest_prefetch_time_,
end_logical_time_));
++earliest_prefetch_time_) {
}
if (earliest_prefetch_time_ > latest_prefetch_time_) {
increasing_prefetch_time_iterator_ = earliest_prefetch_time_;
decreasing_prefetch_time_iterator_ = latest_prefetch_time_;
CHECK(Done());
return;
}
int64_t starting_prefetch_time;
if (preferred_time && *preferred_time <= latest_prefetch_time_) {
starting_prefetch_time = *preferred_time;
} else {
starting_prefetch_time =
PreferredPrefetchStartTime(shape, earliest_prefetch_time_,
latest_prefetch_time_, end_logical_time_);
}
float preferred_interval =
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed_;
VLOG(4) << "Interval min/max/preferred = " << min_interval << " "
<< max_interval << " " << preferred_interval
<< " prefetch time earliest/latest/starting = "
<< earliest_prefetch_time_ << " " << latest_prefetch_time_ << " "
<< starting_prefetch_time;
increasing_prefetch_time_iterator_ = starting_prefetch_time;
decreasing_prefetch_time_iterator_ = starting_prefetch_time;
using_increasing_prefetch_time_iterator_ = true;
Next();
}
int64_t CostAnalysisPrefetchIntervalPicker::Next() {
CHECK(!Done()) << "Prefetch interval picker's Next() is called even though "
"Done() is false";
if (using_increasing_prefetch_time_iterator_) {
int64_t prefetch_time = increasing_prefetch_time_iterator_++;
while (increasing_prefetch_time_iterator_ <= latest_prefetch_time_ &&
computation_nest_level_[increasing_prefetch_time_iterator_] !=
computation_nest_level_[end_logical_time_]) {
++increasing_prefetch_time_iterator_;
}
if (decreasing_prefetch_time_iterator_ >= earliest_prefetch_time_) {
using_increasing_prefetch_time_iterator_ = false;
}
return prefetch_time;
} else {
int64_t prefetch_time = decreasing_prefetch_time_iterator_--; | #include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include <cstdint>
#include <optional>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/testing_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
namespace {
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
using CostAnalysisPrefetchIntervalPickerTest = HloTestBase;
TEST_F(CostAnalysisPrefetchIntervalPickerTest, PrefetchIntervalOrder) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] negate(p)
r = f32[2,4] negate(q)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
u = f32[2,4] negate(t)
ROOT v = f32[2,4] add(u, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
4.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 0, 22, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 15);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 16);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 14);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 17);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 13);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 18);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 12);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 11);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 10);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 9);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
interval_picker.Begin(use, 19, 22, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, PrefetchIntervalOrderWhile) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
add = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] negate(p)
tuple = (f32[2,4]) tuple(q)
while = (f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte1 = f32[2,4] get-tuple-element(while), index=0
r = f32[2,4] negate(gte1)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
u = f32[2,4] negate(t)
ROOT v = f32[2,4] add(u, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
EXPECT_EQ(cost_analysis->GetWhileNestMultiplier(1), 5.0);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 0, 31, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 25);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 26);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 18);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 27);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 17);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, NestedWhile) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition.2 {
param1 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body.2 {
param2 = (f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
add = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add)
}
while_condition.1 {
param3 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body.1 {
param4 = (f32[2,4]) parameter(0)
gte1 = f32[2,4] get-tuple-element(param4), index=0
add1 = f32[2,4] add(gte1, gte1)
tuple1 = (f32[2,4]) tuple(add1)
while = (f32[2,4]) while(tuple1), condition=while_condition.2, body=while_body.2
gte2 = f32[2,4] get-tuple-element(while), index=0
add2 = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add2)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
tuple = (f32[2,4]) tuple(c)
while = (f32[2,4]) while(tuple), condition=while_condition.1, body=while_body.1
gte1 = f32[2,4] get-tuple-element(while), index=0
ROOT root = f32[2,4] add(gte1, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
const Shape& shape = root->operand(1)->shape();
EXPECT_EQ(interval_picker.LatestPrefetchStartTime(shape, 0,
23, &use),
4);
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, ConsecutiveConditionals) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
true_computation.0 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation.0 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
true_computation.1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation.1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = f32[3]{0} parameter(1)
p2 = pred[] parameter(2)
tuple0 = (f32[3]{0}) tuple(p0)
tuple1 = (f32[3]{0}) tuple(p1)
conditional0 = f32[3]{0} conditional(p2, tuple0, tuple0), true_computation=true_computation.0, false_computation=false_computation.0
conditional1 = f32[3]{0} conditional(p2, tuple1, tuple1), true_computation=true_computation.1, false_computation=false_computation.1
ROOT tuple2 = (f32[3]{0}, f32[3]{0}) tuple(conditional0, conditional1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
LOG(INFO) << module->ToString();
HloInstruction* conditional1 =
module->entry_computation()->GetInstructionWithName("conditional1");
const HloUse use{conditional1, 1, {0}};
const Shape& shape =
module->entry_computation()->parameter_instruction(0)->shape();
EXPECT_LT(interval_picker.LatestPrefetchStartTime(shape, 0,
11, &use),
5);
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, EarliestLatestWindowTooSmall) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
negate = f32[2,4] negate(param0)
tanh = f32[2,4] tanh(param0)
ROOT add = f32[2,4] add(tanh, negate)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
cost_analysis->SetOverrideForGetInstructionElapsed(
[](const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kTanh) {
return 20.0;
}
return 1.0;
});
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 1, 3, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_FALSE(interval_picker.Done());
EXPECT_EQ(interval_picker.Next(), 1);
EXPECT_TRUE(interval_picker.Done());
}
}
}
} |
1,999 | cpp | tensorflow/tensorflow | schedule_aware_collective_ops_cse | third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse.cc | third_party/xla/xla/service/spmd/schedule_aware_collective_ops_cse_test.cc | #ifndef XLA_SERVICE_SPMD_SCHEDULE_AWARE_COLLECTIVE_OPS_CSE_H_
#define XLA_SERVICE_SPMD_SCHEDULE_AWARE_COLLECTIVE_OPS_CSE_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ScheduleAwareCollectiveOpsCSE : public HloModulePass {
public:
explicit ScheduleAwareCollectiveOpsCSE(int64_t distance_threshold,
bool for_replicas)
: distance_threshold_(distance_threshold), for_replicas_(for_replicas) {}
~ScheduleAwareCollectiveOpsCSE() override = default;
absl::string_view name() const override {
return "schedule-aware-collective-cse";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
int64_t distance_threshold_;
bool for_replicas_;
};
}
#endif
#include "xla/service/spmd/schedule_aware_collective_ops_cse.h"
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsAddingOnlyDegenerateDimensions(const HloInstruction* inst) {
if (inst->opcode() != HloOpcode::kBitcast &&
inst->opcode() != HloOpcode::kReshape) {
return false;
}
const Shape& in_shape = inst->operand(0)->shape();
const Shape& out_shape = inst->shape();
return ShapeUtil::ElementsIn(in_shape) == ShapeUtil::ElementsIn(out_shape) &&
ShapeUtil::DimensionsUnmodifiedByReshape(in_shape, out_shape).size() ==
in_shape.rank();
}
const HloInstruction* PassthroughDegenerateAddingReshapes(
const HloInstruction* inst) {
while (IsAddingOnlyDegenerateDimensions(inst)) {
inst = inst->operand(0);
}
return inst;
}
bool ShouldConsiderSchedule(HloInstruction* hlo) {
return hlo->opcode() != HloOpcode::kCollectivePermute;
}
HloInstruction* MayConsiderCollective(HloInstruction* hlo, bool for_replicas) {
auto chan_instr = DynCast<HloChannelInstruction>(hlo);
if (!chan_instr) {
return nullptr;
}
if (for_replicas == chan_instr->channel_id().has_value()) {
return nullptr;
}
if (hlo->opcode() == HloOpcode::kCollectivePermute) {
return hlo;
}
auto coll = DynCast<HloCollectiveInstruction>(hlo);
if (!coll) {
return nullptr;
}
if (coll->constrain_layout()) {
return nullptr;
}
if (coll->opcode() == HloOpcode::kAllGather) {
return coll;
}
if (coll->opcode() == HloOpcode::kAllReduce && coll->shape().IsArray()) {
auto operand = coll->operand(0);
return operand->opcode() == HloOpcode::kDynamicUpdateSlice &&
operand->operand(0)->opcode() == HloOpcode::kBroadcast
? coll
: nullptr;
}
return nullptr;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* comp, bool for_replicas,
int64_t distance_threshold) {
bool changed = false;
absl::flat_hash_map<const HloInstruction*, int64_t> height;
auto ordered_hlos = comp->MakeInstructionPostOrder();
int64_t max_height = 0;
for (auto it = ordered_hlos.rbegin(); it != ordered_hlos.rend(); ++it) {
auto hlo = *it;
int64_t h = 0;
for (auto user : hlo->users()) {
h = std::max(h, height[user]) + 1;
}
max_height = std::max(max_height, h);
height[hlo] = h;
}
auto lowest_user_height = [&](const HloInstruction* hlo) {
int64_t lowest = height[hlo];
for (auto user : hlo->users()) {
lowest = std::min(lowest, height[user]);
}
return lowest;
};
absl::flat_hash_map<const HloInstruction*, std::vector<HloInstruction*>>
operand_to_collective;
for (HloInstruction* hlo : ordered_hlos) {
HloInstruction* coll = MayConsiderCollective(hlo, for_replicas);
if (!coll) {
continue;
}
auto& earlier_colls =
operand_to_collective[PassthroughDegenerateAddingReshapes(
coll->operand(0))];
bool found = false;
int64_t coll_height = height[coll];
for (HloInstruction* earlier_coll : earlier_colls) {
if (!ShapeUtil::Equal(earlier_coll->shape(), coll->shape())) {
continue;
}
HloInstruction* coll_operand = coll->mutable_operand(0);
TF_RETURN_IF_ERROR(
coll->ReplaceOperandWith(0, earlier_coll->mutable_operand(0)));
if (!earlier_coll->IdenticalIgnoringChannelIdValues(*coll)) {
TF_RETURN_IF_ERROR(coll->ReplaceOperandWith(0, coll_operand));
continue;
}
found = true;
if (ShouldConsiderSchedule(coll) &&
lowest_user_height(earlier_coll) > coll_height + distance_threshold) {
TF_RETURN_IF_ERROR(coll->ReplaceOperandWith(0, coll_operand));
earlier_coll = coll;
continue;
}
changed = true;
VLOG(1) << "Replacing " << coll->ToString() << " with "
<< earlier_coll->ToString();
TF_RETURN_IF_ERROR(coll->ReplaceAllUsesWith(earlier_coll));
break;
}
if (!found) {
earlier_colls.push_back(coll);
}
}
return changed;
}
}
absl::StatusOr<bool> ScheduleAwareCollectiveOpsCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(
auto comp_changed,
RunOnComputation(comp, for_replicas_, distance_threshold_));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/schedule_aware_collective_ops_cse.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace spmd {
namespace {
class CollectiveOpsCseTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, int64_t distance_threshold = 100) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<ScheduleAwareCollectiveOpsCSE>(distance_threshold,
false);
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
};
TEST_F(CollectiveOpsCseTest, SimpleCseAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[2,8]{1,0} parameter(0)
cp1 = s32[2,8]{1,0} collective-permute(param0), source_target_pairs={{0,1},{1,0}},
channel_id=0
cp2 = s32[2,8]{1,0} collective-permute(param0), source_target_pairs={{0,1},{1,0}},
channel_id=1
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(cp1, cp2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseReshapeLookthroughAllGather) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
ag1 = s32[2,8]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(rshp2), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseReshapeLookthroughCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
cp1 = s32[1,8]{1,0} collective-permute(rshp), source_target_pairs={{0,1},{1,0}},
channel_id=0
cp2 = s32[1,8]{1,0} collective-permute(rshp2), source_target_pairs={{0,1},{1,0}},
channel_id=1
ROOT tuple = (s32[1,8]{1,0}, s32[1,8]{1,0}) tuple(cp1, cp2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleNoCseInvalidReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[2,4]{1,0} reshape(param0)
rshp2 = s32[2,4]{1,0} reshape(param0)
ag1 = s32[4,4]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[4,4]{1,0} all-gather(rshp2), replica_groups={{0,1}}, dimensions={0},
channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[4,4]{1,0}, s32[4,4]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseDifferentDim) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={1},
channel_id=0, use_global_device_ids=true
ag2 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}},
dimensions={1}, channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, SimpleCseDifferentDimReshapeLookthrough) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
rshp = s32[1,8]{1,0} reshape(param0)
rshp2 = s32[1,8]{1,0} reshape(param0)
ag1 = s32[1,16]{1,0} all-gather(rshp), replica_groups={{0,1}}, dimensions={1},
channel_id=0, use_global_device_ids=true
ag2 = s32[1,16]{1,0} all-gather(rshp2), replica_groups={{0,1}},
dimensions={1}, channel_id=1, use_global_device_ids=true
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_EQ(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, NoCseGlobalDevice) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={0},
channel_id=0, use_global_device_ids=true
ag2 = s32[2,8]{1,0} all-gather(param0), replica_groups={{0},{1}}, dimensions={0},
channel_id=1, use_global_device_ids=false
ROOT tuple = (s32[2,8]{1,0}, s32[2,8]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
TEST_F(CollectiveOpsCseTest, NoCseChannelIdMismatch) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[1,8]{1,0} parameter(0)
ag1 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}}, dimensions={1},
channel_id=0
ag2 = s32[1,16]{1,0} all-gather(param0), replica_groups={{0,1}},
dimensions={1}
ROOT tuple = (s32[1,16]{1,0}, s32[1,16]{1,0}) tuple(ag1, ag2)
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
HloInstruction* tuple = module->entry_computation()->root_instruction();
EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(tuple->operand_count(), 2);
EXPECT_NE(tuple->operand(0), tuple->operand(1));
}
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.